adding files skipped by accident due to the .gitignore file
authorJeremy Mordkoff <Jeremy.Mordkoff@riftio.com>
Fri, 1 Apr 2016 21:06:22 +0000 (17:06 -0400)
committerJeremy Mordkoff <Jeremy.Mordkoff@riftio.com>
Fri, 1 Apr 2016 21:06:22 +0000 (17:06 -0400)
Signed-off-by: Jeremy Mordkoff <Jeremy.Mordkoff@riftio.com>
80 files changed:
modules/core/rwvx/rwcal/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/Makefile [new file with mode: 0644]
modules/core/rwvx/rwcal/include/riftware/rwcal-api.h [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/rwcalproxytasklet/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/__init__.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/rwcalproxytasklet.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/rwcalproxytasklet/rwcalproxytasklet.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/Makefile [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal.vala [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/Makefile [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/__init__.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_drv.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_table.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/delete_vm.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/exceptions.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/prepare_vm.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rwcal_aws.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/Makefile [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/__init__.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/core.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/exceptions.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/image.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lvm.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lxc.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/net.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/shell.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rwcal_cloudsim.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/test/cloudsim_module_test.py [new file with mode: 0755]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsimproxy/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsimproxy/Makefile [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsimproxy/rwcal_cloudsimproxy.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_mock/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_mock/Makefile [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_mock/rwcal_mock.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_openmano/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_openmano/Makefile [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_openmano/rwcal_openmano.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/Makefile [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/__init__.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_vsphere/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_vsphere/Makefile [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_vsphere/rift/vsphere/vsphere.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_vsphere/rwcal_vsphere.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_zk/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/vala/rwcal_zk/rwcal_zk.py [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/yang/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/yang/Makefile [new file with mode: 0644]
modules/core/rwvx/rwcal/plugins/yang/rwcal.yang [new file with mode: 0755]
modules/core/rwvx/rwcal/rift/cal/rwzk.py [new file with mode: 0644]
modules/core/rwvx/rwcal/src/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/src/Makefile [new file with mode: 0644]
modules/core/rwvx/rwcal/src/rwcal_py.c [new file with mode: 0644]
modules/core/rwvx/rwcal/src/rwcal_rwzk.c [new file with mode: 0644]
modules/core/rwvx/rwcal/src/rwvim.py [new file with mode: 0755]
modules/core/rwvx/rwcal/test/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/test/RIFT.ware-ready.py [new file with mode: 0755]
modules/core/rwvx/rwcal/test/aws_resources.py [new file with mode: 0644]
modules/core/rwvx/rwcal/test/cal_module_test/CMakeLists.txt [new file with mode: 0644]
modules/core/rwvx/rwcal/test/cal_module_test/cal_module_test [new file with mode: 0755]
modules/core/rwvx/rwcal/test/cal_module_test/pytest/cal_module_test.py [new file with mode: 0644]
modules/core/rwvx/rwcal/test/cal_module_test/pytest/conftest.py [new file with mode: 0644]
modules/core/rwvx/rwcal/test/cal_module_test/racfg/cal_module_test.racfg [new file with mode: 0644]
modules/core/rwvx/rwcal/test/cloudtool_cal.py [new file with mode: 0755]
modules/core/rwvx/rwcal/test/ec2.py [new file with mode: 0644]
modules/core/rwvx/rwcal/test/openstack_resources.py [new file with mode: 0755]
modules/core/rwvx/rwcal/test/rwcal_callback_gtest.cpp [new file with mode: 0644]
modules/core/rwvx/rwcal/test/rwcal_dump.cpp [new file with mode: 0644]
modules/core/rwvx/rwcal/test/rwcal_zk_gtest.cpp [new file with mode: 0644]
modules/core/rwvx/rwcal/test/test_container_cal.py [new file with mode: 0644]
modules/core/rwvx/rwcal/test/test_openstack_install.py [new file with mode: 0644]
modules/core/rwvx/rwcal/test/test_rwcal_openstack.py [new file with mode: 0644]
modules/core/rwvx/rwcal/test/test_rwlxc_rwlaunchpad.py [new file with mode: 0644]

diff --git a/modules/core/rwvx/rwcal/CMakeLists.txt b/modules/core/rwvx/rwcal/CMakeLists.txt
new file mode 100644 (file)
index 0000000..9816a7d
--- /dev/null
@@ -0,0 +1,35 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 2014/05/22
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(PKG_NAME rwcal)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+set(subdirs src plugins test)
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
+
+install(FILES include/riftware/rwcal-api.h
+  DESTINATION usr/include/riftware
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+    PROGRAMS
+    etc/userdata-template
+  DESTINATION etc
+  COMPONENT ${PKG_LONG_NAME}
+  )
+
+
+rift_python_install_tree(
+  FILES
+    rift/cal/rwzk.py
+  PYTHON3_ONLY
+  COMPONENT rwcal-1.0)
+
diff --git a/modules/core/rwvx/rwcal/Makefile b/modules/core/rwvx/rwcal/Makefile
new file mode 100644 (file)
index 0000000..f68ec52
--- /dev/null
@@ -0,0 +1,24 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 05/22/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/modules/core/rwvx/rwcal/include/riftware/rwcal-api.h b/modules/core/rwvx/rwcal/include/riftware/rwcal-api.h
new file mode 100644 (file)
index 0000000..9765b16
--- /dev/null
@@ -0,0 +1,800 @@
+
+/*
+ * 
+ * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+ *
+ */
+
+
+/**
+ * @file rwvx.h
+ * @author Justin Bronder (justin.bronder@riftio.com)
+ * @date 09/29/2014
+ * @brief Top level API include for rwcal submodule
+ */
+
+#ifndef __RWCAL_API_H__
+#define __RWCAL_API_H__
+
+#include <stdbool.h>
+
+#include <libpeas/peas.h>
+
+#include <rwcal.h>
+#include <rwlib.h>
+#include <rw-manifest.pb-c.h>
+#include <rw_vx_plugin.h>
+
+#include "rwlog.h"
+
+__BEGIN_DECLS
+
+struct rwcal_module_s {
+  rw_vx_framework_t * framework;
+  rw_vx_modinst_common_t *mip;
+
+  PeasExtension * zk;
+  RwCalZookeeper * zk_cls;
+  RwCalZookeeperIface * zk_iface;
+
+  PeasExtension * cloud;
+  RwCalCloud * cloud_cls;
+  RwCalCloudIface * cloud_iface;
+
+  rwlog_ctx_t *rwlog_instance;
+};
+typedef struct rwcal_module_s * rwcal_module_ptr_t;
+
+/* Type generated by vala, see rwcal.vala RwCal::Closure */
+typedef RwCalClosure * rwcal_closure_ptr_t;
+
+// Redefine yang autonames
+typedef RWPB_E(RwManifest_RwcalCloudType) rwcal_cloud_type;
+
+/*
+ * Allocate a rwcal module.  Once allocated, the clients within
+ * the module still need to be initialized.  For rwzk, see
+ * rwcal_rwzk_{kazoo,zake}_init().  For rwcloud, see
+ * rwcal_cloud_init().  It is a fatal error to attempt to use any
+ * client before it has been initialized.  However, it is
+ * perfectly fine to not initialize a client that will remain
+ * unused.  Note that every function contains the client that it
+ * will use as part of the name, just after the rwcal_ prefix.
+ *
+ * @return - rwcal module handle or NULL on failure.
+ */
+rwcal_module_ptr_t rwcal_module_alloc();
+
+/*
+ * Deallocate a rwcal module.
+ *
+ * @param - pointer to the rwcal module to be deallocated.
+ */
+void rwcal_module_free(rwcal_module_ptr_t * rwcal);
+
+/*
+ * Create a rwcal closure.  The closure can be passed as an argument to python
+ * functions which expect a callback.  Then, when python triggers its callback,
+ * the closure will execute the function that was passed in.  If the function
+ * returns anything but RW_STATUS_SUCCESS, the corresponding exception will
+ * be raised in python (RWError*), where it can be handled normally by python.
+ *
+ * Callbacks must match the declaration:
+ *  rw_status_t callback(rwcal_module_ptr_t rwcal, void * user_data);
+ * The first parameter will be set to the rwcal instance that created this
+ * closure.  The second is user specified.
+ *
+ * @param rwcal     - module handle.
+ * @param callback  - callback to execute when python's callback is triggered.
+ * @param user_data - passed as the second argument to the callback.
+ * @return          - rwcal closure instance or NULL on error.
+ */
+rwcal_closure_ptr_t rwcal_closure_alloc(
+    rwcal_module_ptr_t rwcal,
+    rw_status_t (*callback)(rwcal_module_ptr_t, void *, int),
+    void * user_data);
+
+/*
+ * Deallocate a rwcal closure.  Note that any user data that was passed in will
+ * not be touched.  It is up to the caller to handle that.  On return, closure
+ * will be set to NULL.
+ *
+ * @param closure - pointer to closure to deallocate.
+ */
+void rwcal_closure_free(rwcal_closure_ptr_t * closure);
+
+/*
+ * Create the zookeeper server configuration.
+ *
+ * @param rwcal         - module handle.
+ * @param id            - identifier of this server in the zookeeper ensemble.
+ * @param unique_ports  - generate port number based on UID.
+ * @param server_names  - NULL terminated list of zookeeper servers.
+ * @return              - rift_status.
+ */
+rw_status_t rwcal_rwzk_create_server_config(
+    rwcal_module_ptr_t rwcal,
+    int id,
+    bool unique_ports,
+    const char ** server_names);
+
+/*
+ * Start the zookeeper server.
+ *
+ * @param rwcal         - module handle.
+ * @param id            - identifier of this server in the zookeeper ensemble.
+ * @return              - rift_status.
+ */
+rw_status_t rwcal_rwzk_server_start(rwcal_module_ptr_t rwcal, int id);
+
+/*
+ * Initialize rwcal to use a real zookeeper server.  This is done
+ * by using the python Kazoo module.
+ *
+ * @param rwcal         - module handle.
+ * @param unique_ports  - use ports based on UID.
+ * @param server_names  - NULL terminated list of zookeeper servers.
+ * @return              - rift_status.
+ */
+rw_status_t rwcal_rwzk_kazoo_init(rwcal_module_ptr_t rwcal, bool unique_ports, const char ** server_names);
+
+/*
+ * Initialize rwcal to use a fake, in-memory, server.  This is suitable
+ * for fully collapsed RIFT collectives.
+ *
+ * @param rwcal - module handle.
+ * @return      - rift status.
+ */
+rw_status_t rwcal_rwzk_zake_init(rwcal_module_ptr_t rwcal);
+
+/*
+ * Create a zookeeper node
+ *
+ * @param rwcal   - module handle.
+ * @param path    - path to the node to create.
+ * @param closure - callback closure which would result in invoke of async
+ *                  flavor of zk operation
+ * @return      - RW_STATUS_SUCCESS on creation,
+ *                RW_STATUS_EXISTS if the node already exists,
+ *                RW_STATUS_FAILURE otherwise.
+ */
+rw_status_t rwcal_rwzk_create(rwcal_module_ptr_t rwcal, const char * path,
+                              const rwcal_closure_ptr_t closure);
+
+/*
+ * Check if a zookeeper node exists
+ *
+ * @param rwcal - module handle.
+ * @param path  - path to the node.
+ * @return      - true if the node exists, false otherwise.
+ */
+bool rwcal_rwzk_exists(rwcal_module_ptr_t rwcal, const char * path);
+
+/*
+ * Get data stored at the given zookeeper node.
+ *
+ * @param rwcal   - module_handle.
+ * @param path    - path to node.
+ * @param data    - on success, contains a pointer to a buffer containing the node data.
+ * @param closure - callback closure which would result in invoke of async
+ *                  flavor of zk operation
+ * @return      - RW_STATUS_SUCCESS,
+ *                RW_STATUS_NOTFOUND if the node doesn't exists,
+ *                RW_STATUS_FAILURE otherwise.
+ */
+rw_status_t rwcal_rwzk_get(
+    rwcal_module_ptr_t rwcal,
+    const char * path,
+    char ** data,
+    const rwcal_closure_ptr_t closure);
+
+/*
+ * Set data stored at the given zookeeper node.
+ *
+ * @param rwcal   - module_handle.
+ * @param path    - path to node.
+ * @param data    - pointer to data to set in the node.
+ * @param closure - callback closure which would result in invoke of async
+ *                  flavor of zk operation
+ * @return      - RW_STATUS_SUCCESS,
+ *                RW_STATUS_NOTFOUND if the node doesn't exists,
+ *                RW_STATUS_FAILURE otherwise.
+ */
+rw_status_t rwcal_rwzk_set(
+    rwcal_module_ptr_t rwcal,
+    const char * path,
+    const char * data,
+    const rwcal_closure_ptr_t closure);
+
+/*
+ * Get a list of the children of the specified node.
+ *
+ * @param rwcal     - module handle.
+ * @param path      - path to node.
+ * @param children  - On success, NULL-terminated list of children nodes.
+ * @param closure   - callback closure which would result in invoke of async
+ *                    flavor of zk operation
+ * @return          - RW_STATUS_SUCCESS,
+ *                    RW_STATUS_NOTFOUND if the node doesn't exist,
+ *                    RW_STATUS_FAILURE otherwise.
+ */
+rw_status_t rwcal_rwzk_get_children(
+    rwcal_module_ptr_t rwcfal,
+    const char * path,
+    char *** children,
+    const rwcal_closure_ptr_t closure);
+
+/*
+ * Delete a zookeeper node.  Note that similar to 'rmdir' the node must
+ * not have any children.
+ *
+ * @param rwcal   - module handle.
+ * @param path    - path to node.
+ * @param closure - callback closure which would result in invoke of async
+ *                  flavor of zk operation
+ * @return      - RW_STATUS_SUCCESS,
+ *                RW_STATUS_NOTEMPTY if the node has children,
+ *                RW_STATUS_NOTFOUND if the node doesn't exist,
+ *                RW_STATUS_FAILURE otherwise.
+ */
+rw_status_t rwcal_rwzk_delete(rwcal_module_ptr_t rwcal, const char * path,
+                              const rwcal_closure_ptr_t closure);
+
+/*
+ * Watch a zookeeper node for any data changes as well as creation/deletion
+ * of the node itself.
+ *
+ * @param rwcal   - module handle.
+ * @param path    - path to node to monitor.
+ * @param closure - callback closure which would result in invoke of async
+ *                  flavor of zk operation
+ * @return        - rw_status_t
+ */
+rw_status_t rwcal_rwzk_register_watcher(
+    rwcal_module_ptr_t rwcal,
+    const char * path,
+    const rwcal_closure_ptr_t closure);
+
+/*
+ * Stop watching a zookeeper node for any changes.
+ *
+ * @param rwcal   - module handle.
+ * @param path    - path to stop monitoring.
+ * @param closure - callback closure to unregister.
+ * @return        - rw_status_t
+ */
+rw_status_t rwcal_rwzk_unregister_watcher(
+    rwcal_module_ptr_t rwcal,
+    const char * path,
+    const rwcal_closure_ptr_t closure);
+
+/*
+ * Lock a node for writing.  This call is reentrant.
+ *
+ * @param rwcal   - module handle.
+ * @param path    - path to lock.
+ * @param timeout - if not NULL, maximum amount of time to wait on acquisition
+ * @return        - RW_STATUS_SUCCESS,
+ *                  RW_STATUS_NOTFOUND if the node does not exist,
+ *                  RW_STATUS_NOTCONNECTED if lock acquisition fails
+ *                  RW_STATUS_TIMEOUT if acquisition timed out
+ *                  RW_STATUS_FAILURE otherwise.
+ */
+rw_status_t rwcal_rwzk_lock(
+    rwcal_module_ptr_t rwcal,
+    const char * path,
+    struct timeval * timeout);
+
+/*
+ * Unlock a node for writing.
+ *
+ * @param rwcal - module handle.
+ * @param path  - path to unlock.
+ * @return      - RW_STATUS_SUCCESS,
+ *                RW_STATUS_NOTFOUND if the node does not exist,
+ *                RW_STATUS_NOTCONNECTED if a lock was not previously acquired,
+ *                RW_STATUS_FAILURE otherwise.
+ */
+rw_status_t rwcal_rwzk_unlock(rwcal_module_ptr_t rwcal, const char * path);
+
+/*
+ * Test if a node is locked or not.
+ *
+ * @param rwcal - module handle.
+ * @param path  - path to check for locked status.
+ * @return      - True if the node is locked, false otherwise.
+ */
+bool rwcal_rwzk_locked(rwcal_module_ptr_t rwcal, const char * path);
+
+
+/*
+ * Initialize the rwcal cloud controller.
+ *
+ * key/secret for various cloud types:
+ *  EC2: ACCESS_ID/SECRET_KEY
+ *
+ * @param rwcal       - module handle.
+ * @return        - RW_STATUS_SUCCESS,
+ *                  RW_STATUS_NOTFOUND if the type is unknown,
+ *                  RW_STATUS_FAILURE otherwise.
+ */
+rw_status_t rwcal_cloud_init(rwcal_module_ptr_t rwcal);
+
+/*
+ * Get a list of the names of the available images that can be
+ * used to start a new VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param image_names - on success, contains a NULL-terminated
+ *                      list of image names.
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_image_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **images);
+
+/*
+ * Delete Image.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param image_id    - id of image to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_image(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * image_id);
+
+/*
+ * Create a flavor.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param flavor      - rwpb_gi_Rwcal_FlavorInfoItem object describing the
+ *                      flavor to be created
+ * @param flavor_id   - on success, contains a NULL-terminated string containing the new flavor_id
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_flavor(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_FlavorInfoItem *flavor,
+    char *flavor_id);
+
+
+/*
+ * Delete flavor.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param flavor_id   - id of flavor to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_flavor(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * flavor_id);
+
+/*
+ * Get a specific flavor
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param flavor_id   - id of the flavor to return
+ * @param flavir      - rwpb_gi_Rwcal_FlavorInfoItem object containing the
+ *                      details of the requested flavor
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_flavor(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * flavor_id,
+    rwpb_gi_Rwcal_FlavorInfoItem **flavor);
+
+/*
+ * Get a list of the details for all flavors
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param flavors     - on success, contains a list of flavor info objects
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_flavor_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **flavors);
+
+/*
+ * Create a virtual machine.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm          - the information that defines what kind of VM will be
+ *                      created
+ * @param vm_id       - on success, contains a NULL-terminated string
+ *                      containing the new vm id
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VMInfoItem *vm,
+    char **vm_id);
+
+/*
+ * Delete VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm_id       - id of vm to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * vm_id);
+
+/*
+ * Reboot VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm_id       - id of vm to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_reboot_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * vm_id);
+
+/*
+ * Start VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm_id       - id of a vm to start
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_start_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * vm_id);
+
+/*
+ * Stop VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm_id       - id of a vm to stop
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_stop_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * vm_id);
+
+/*
+ * Get a list of the names of the available vms
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vms         - on success, contains a NULL-terminated
+ *                      list of vms.
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_vm_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources** vms);
+
+/*
+ * Create a tenant.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param tenant_name - name to assign to the tenant.
+ * @param tenant_info - on success, contains a NULL-terminated list of tenant_info
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_tenant(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * tenant_name,
+    char *** tenant_info);
+
+/*
+ * Delete tenant.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param tenant_id   - id of tenant to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_tenant(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * tenant_id);
+
+/*
+ * Get a list of the available tenants
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param tenants     - on success, contains a NULL-terminated
+ *                      list of tenants.
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_tenant_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **tenants);
+
+/*
+ * Create a role.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param role_name   - name to assign to the role.
+ * @param role_info   - on success, contains a NULL-terminated list of role_info
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_role(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * role_name,
+    char *** role_info);
+
+/*
+ * Delete role.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param role_id     - id of role to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_role(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * role_id);
+
+/*
+ * Get a list of the available roles
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param roles       - on success, contains a NULL-terminated
+ *                      list of roles.
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_role_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **roles);
+
+/*
+ * Add a new host
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param host        - host info
+ * @param host_id     - on success, contains a NULL-terminated string
+ *                      containing the new host_id
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_add_host(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_HostInfoItem *host,
+    char **host_id);
+
+/*
+ * Remove a new host
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param host_id     - the id of the host to remove
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_remove_host(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *host_id);
+
+/*
+ * Get a specific host
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param host_id     - the id of the host to return
+ * @param host        - the requested host info
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_host(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *host_id,
+    rwpb_gi_Rwcal_HostInfoItem **host);
+
+/*
+ * Get a list of hosts
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param hosts       - on success, contains a NULL-terminated list of hosts.
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_host_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **hosts);
+
+/*
+ * Create a new port
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param port        - port info
+ * @param port_id     - on success, contains a NULL-terminated string
+ *                      containing the new port id
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_port(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_PortInfoItem *port,
+    char **port_id);
+
+/*
+ * Delete a port
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param port_id     - the id of the port to remove
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_port(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *port_id);
+
+/*
+ * Get a specific port
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param port_id     - the id of the port to return
+ * @param port        - the requested port info
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_port(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *port_id,
+    rwpb_gi_Rwcal_PortInfoItem **port);
+
+/*
+ * Get a list of ports
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param ports       - on success, contains a NULL-terminated list of ports.
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_port_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **ports);
+
+/*
+ * Create a new network
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param network     - network info
+ * @param network_id  - on success, contains a NULL-terminated string
+ *                      containing the new network id
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_network(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_NetworkInfoItem *network,
+    char **network_id);
+
+/*
+ * Delete a network
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param network_id  - the id of the network to remove
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_network(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *network_id);
+
+/*
+ * Get a specific network
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param network_id  - the id of the network to return
+ * @param network     - the requested network info
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_network(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *network_id,
+    rwpb_gi_Rwcal_NetworkInfoItem **network);
+
+/*
+ * Get a the management network
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param network     - the management network info
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_management_network(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_NetworkInfoItem **network);
+
+/*
+ * Get a list of networks
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param networks    - on success, contains a NULL-terminated list of networks.
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_network_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **networks);
+
+/*
+ * Get a RwLog Context so that log messages can go to rwlog
+ *
+ * @param rwcal       - module handle.
+ *
+ * @return            - rwlog_ctx_t
+ */
+rwlog_ctx_t *rwcal_get_rwlog_ctx(rwcal_module_ptr_t rwcal);
+
+
+/*
+ * Get different elements from the userdata
+ *
+ * @param users       - userdata as returned in zk callback functions
+ * @param idx         - index to the data to be picked up
+ * @return            - pointer to first userdata pointer or NULL
+ */
+void *rwcal_get_userdata_idx(void *userdata, int idx);
+
+__END_DECLS
+
+#endif
+
+
diff --git a/modules/core/rwvx/rwcal/plugins/CMakeLists.txt b/modules/core/rwvx/rwcal/plugins/CMakeLists.txt
new file mode 100644 (file)
index 0000000..c009235
--- /dev/null
@@ -0,0 +1,11 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 2014/05/22
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs vala yang rwcalproxytasklet)
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/modules/core/rwvx/rwcal/plugins/rwcalproxytasklet/CMakeLists.txt b/modules/core/rwvx/rwcal/plugins/rwcalproxytasklet/CMakeLists.txt
new file mode 100644 (file)
index 0000000..8107df4
--- /dev/null
@@ -0,0 +1,19 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwcalproxytasklet rwcalproxytasklet.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/rwcalproxytasklet/__init__.py
+    rift/tasklets/rwcalproxytasklet/rwcalproxytasklet.py
+  COMPONENT rwcalproxytasklet-1.0
+  PYTHON3_ONLY)
diff --git a/modules/core/rwvx/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/__init__.py b/modules/core/rwvx/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/__init__.py
new file mode 100644 (file)
index 0000000..94af0b3
--- /dev/null
@@ -0,0 +1 @@
+from .rwcalproxytasklet import RwCalProxyTasklet
diff --git a/modules/core/rwvx/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/rwcalproxytasklet.py b/modules/core/rwvx/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/rwcalproxytasklet.py
new file mode 100644 (file)
index 0000000..b6cbf56
--- /dev/null
@@ -0,0 +1,619 @@
+"""
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+@file rwcalproxytasklet.py
+@author Austin Cormier(austin.cormier@riftio.com)
+@date 2015-10-20
+"""
+
+import asyncio
+import collections
+import concurrent.futures
+import logging
+import os
+import sys
+
+import tornado
+import tornado.httpserver
+import tornado.web
+import tornado.platform.asyncio
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+    RwDts as rwdts,
+    RwcalYang,
+    RwTypes,
+)
+
+import rw_peas
+import rift.tasklets
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class CalCallFailure(Exception):
+    pass
+
+
+class RPCParam(object):
+    def __init__(self, key, proto_type=None):
+        self.key = key
+        self.proto_type = proto_type
+
+
+class CalRequestHandler(tornado.web.RequestHandler):
+    def initialize(self, log, loop, cal, account, executor, cal_method,
+                   input_params=None, output_params=None):
+        self.log = log
+        self.loop = loop
+        self.cal = cal
+        self.account = account
+        self.executor = executor
+        self.cal_method = cal_method
+        self.input_params = input_params
+        self.output_params = output_params
+
+    def wrap_status_fn(self, fn, *args, **kwargs):
+        ret = fn(*args, **kwargs)
+        if not isinstance(ret, collections.Iterable):
+            ret = [ret]
+
+        rw_status = ret[0]
+        if type(rw_status) != RwTypes.RwStatus:
+            raise ValueError("First return value of %s function was not a RwStatus" %
+                             fn.__name__)
+
+        if rw_status != RwTypes.RwStatus.SUCCESS:
+            msg = "%s returned %s" % (fn.__name__, str(rw_status))
+            self.log.error(msg)
+            raise CalCallFailure(msg)
+
+        return ret[1:]
+
+    @tornado.gen.coroutine
+    def post(self):
+        def body_to_cal_args():
+            cal_args = []
+            if self.input_params is None:
+                return cal_args
+
+            input_dict = tornado.escape.json_decode(self.request.body)
+            if len(input_dict) != len(self.input_params):
+                raise ValueError("Got %s parameters, expected %s" %
+                                 (len(input_dict), len(self.input_params)))
+
+            for input_param in self.input_params:
+                key = input_param.key
+                value = input_dict[key]
+                proto_type = input_param.proto_type
+
+                if proto_type is not None:
+                    proto_cls = getattr(RwcalYang, proto_type)
+                    self.log.debug("Deserializing into %s type", proto_cls)
+                    value = proto_cls.from_dict(value)
+
+                cal_args.append(value)
+
+            return cal_args
+
+        def cal_return_vals(return_vals):
+            output_params = self.output_params
+            if output_params is None:
+                output_params = []
+
+            if len(return_vals) != len(output_params):
+                raise ValueError("Got %s return values.  Expected %s",
+                                 len(return_vals), len(output_params))
+
+            write_dict = {"return_vals": []}
+            for i, output_param in enumerate(output_params):
+                key = output_param.key
+                proto_type = output_param.proto_type
+                output_value = return_vals[i]
+
+                if proto_type is not None:
+                    output_value = output_value.as_dict()
+
+                return_val = {
+                        "key": key,
+                        "value": output_value,
+                        "proto_type": proto_type,
+                        }
+
+                write_dict["return_vals"].append(return_val)
+
+            return write_dict
+
+        @asyncio.coroutine
+        def handle_request():
+            self.log.debug("Got cloudsimproxy POST request: %s", self.request.body)
+            cal_args = body_to_cal_args()
+
+            # Execute the CAL request in a seperate thread to prevent
+            # blocking the main loop.
+            return_vals = yield from self.loop.run_in_executor(
+                    self.executor,
+                    self.wrap_status_fn,
+                    getattr(self.cal, self.cal_method),
+                    self.account,
+                    *cal_args
+                    )
+
+            return cal_return_vals(return_vals)
+
+        f = asyncio.ensure_future(handle_request(), loop=self.loop)
+        return_dict = yield tornado.platform.asyncio.to_tornado_future(f)
+
+        self.log.debug("Responding to %s RPC with %s", self.cal_method, return_dict)
+
+        self.clear()
+        self.set_status(200)
+        self.write(return_dict)
+
+
+class CalProxyApp(tornado.web.Application):
+    def __init__(self, log, loop, cal_interface, cal_account):
+        self.log = log
+        self.loop = loop
+        self.cal = cal_interface
+        self.account = cal_account
+
+        attrs = dict(
+            log=self.log,
+            loop=self.loop,
+            cal=cal_interface,
+            account=cal_account,
+            # Create an executor with a single worker to prevent
+            # having multiple simulteneous calls into CAL (which is not threadsafe)
+            executor=concurrent.futures.ThreadPoolExecutor(1)
+            )
+
+        def mk_attrs(cal_method, input_params=None, output_params=None):
+            new_attrs = {
+                    "cal_method": cal_method,
+                    "input_params": input_params,
+                    "output_params": output_params
+                    }
+            new_attrs.update(attrs)
+
+            return new_attrs
+
+        super(CalProxyApp, self).__init__([
+            (r"/api/get_image_list", CalRequestHandler,
+                mk_attrs(
+                    cal_method="get_image_list",
+                    output_params=[
+                        RPCParam("images", "VimResources"),
+                        ]
+                    ),
+                ),
+
+            (r"/api/create_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="create_image",
+                    input_params=[
+                        RPCParam("image", "ImageInfoItem"),
+                        ],
+                    output_params=[
+                        RPCParam("image_id"),
+                        ]
+                    ),
+                ),
+
+            (r"/api/delete_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="delete_image",
+                    input_params=[
+                        RPCParam("image_id"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/get_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="get_image",
+                    input_params=[
+                        RPCParam("image_id"),
+                        ],
+                    output_params=[
+                        RPCParam("image", "ImageInfoItem"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/create_vm", CalRequestHandler,
+                mk_attrs(
+                    cal_method="create_vm",
+                    input_params=[
+                        RPCParam("vm", "VMInfoItem"),
+                        ],
+                    output_params=[
+                        RPCParam("vm_id"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/start_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="start_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/stop_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="stop_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/reboot_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="reboot_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vm_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vm_list",
+                        output_params=[
+                            RPCParam("vms", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        output_params=[
+                            RPCParam("vms", "VMInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_flavor",
+                        input_params=[
+                            RPCParam("flavor", "FlavorInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_flavor",
+                        input_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_flavor_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_flavor_list",
+                        output_params=[
+                            RPCParam("flavors", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_flavor",
+                        input_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        output_params=[
+                            RPCParam("flavor", "FlavorInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_network",
+                        input_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("network_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_network",
+                        input_params=[
+                            RPCParam("network_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_network",
+                        input_params=[
+                            RPCParam("network_id"),
+                            ],
+                        output_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_network_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_network_list",
+                        output_params=[
+                            RPCParam("networks", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_management_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_management_network",
+                        output_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_port",
+                        input_params=[
+                            RPCParam("port", "PortInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("port_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_port",
+                        input_params=[
+                            RPCParam("port_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_port",
+                        input_params=[
+                            RPCParam("port_id"),
+                            ],
+                        output_params=[
+                            RPCParam("port", "PortInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_port_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_port_list",
+                        output_params=[
+                            RPCParam("ports", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_virtual_link",
+                        input_params=[
+                            RPCParam("link_params", "VirtualLinkReqParams"),
+                            ],
+                        output_params=[
+                            RPCParam("link_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_virtual_link",
+                        input_params=[
+                            RPCParam("link_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_virtual_link",
+                        input_params=[
+                            RPCParam("link_id"),
+                            ],
+                        output_params=[
+                            RPCParam("response", "VirtualLinkInfoParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_virtual_link_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_virtual_link_list",
+                        output_params=[
+                            RPCParam("resources", "VNFResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_vdu",
+                        input_params=[
+                            RPCParam("vdu_params", "VDUInitParams"),
+                            ],
+                        output_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/modify_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="modify_vdu",
+                        input_params=[
+                            RPCParam("vdu_params", "VDUModifyParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_vdu",
+                        input_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vdu",
+                        input_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        output_params=[
+                            RPCParam("response", "VDUInfoParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vdu_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vdu_list",
+                        output_params=[
+                            RPCParam("resources", "VNFResources"),
+                            ],
+                        ),
+                    )
+            ])
+
+
+class RwCalProxyTasklet(rift.tasklets.Tasklet):
+    HTTP_PORT = 9002
+    cal_interface = None
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+        self.app = None
+        self.server = None
+
+    def get_cal_interface(self):
+        if RwCalProxyTasklet.cal_interface is None:
+            plugin = rw_peas.PeasPlugin('rwcal_cloudsim', 'RwCal-1.0')
+            engine, info, extension = plugin()
+
+            RwCalProxyTasklet.cal_interface = plugin.get_interface("Cloud")
+            RwCalProxyTasklet.cal_interface.init(self.log_hdl)
+
+        return RwCalProxyTasklet.cal_interface
+
+    def start(self):
+        """Tasklet entry point"""
+        self.log.setLevel(logging.DEBUG)
+
+        super().start()
+
+        cal = self.get_cal_interface()
+        account = RwcalYang.CloudAccount(account_type="cloudsim")
+
+        self.app = CalProxyApp(self.log, self.loop, cal, account)
+        self._dts = rift.tasklets.DTS(
+                self.tasklet_info,
+                RwcalYang.get_schema(),
+                self.loop,
+                self.on_dts_state_change
+                )
+
+        io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
+        self.server = tornado.httpserver.HTTPServer(
+                self.app,
+                io_loop=io_loop,
+                )
+
+        self.log.info("Starting Cal Proxy Http Server on port %s",
+                      RwCalProxyTasklet.HTTP_PORT)
+        self.server.listen(RwCalProxyTasklet.HTTP_PORT)
+
+    def stop(self):
+      try:
+         self.server.stop()
+         self._dts.deinit()
+      except Exception:
+         print("Caught Exception in LP stop:", sys.exc_info()[0])
+         raise
+
+    @asyncio.coroutine
+    def init(self):
+        pass
+
+    @asyncio.coroutine
+    def run(self):
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Take action according to current dts state to transition
+        application into the corresponding application state
+
+        Arguments
+            state - current dts state
+        """
+
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self._dts.handle.set_state(next_state)
diff --git a/modules/core/rwvx/rwcal/plugins/rwcalproxytasklet/rwcalproxytasklet.py b/modules/core/rwvx/rwcal/plugins/rwcalproxytasklet/rwcalproxytasklet.py
new file mode 100644 (file)
index 0000000..cafdc21
--- /dev/null
@@ -0,0 +1,17 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwcalproxytasklet
+
+
+class Tasklet(rift.tasklets.rwcalproxytasklet.RwCalProxyTasklet):
+    pass
+
+# vim: sw=4
diff --git a/modules/core/rwvx/rwcal/plugins/vala/CMakeLists.txt b/modules/core/rwvx/rwcal/plugins/vala/CMakeLists.txt
new file mode 100644 (file)
index 0000000..c095d97
--- /dev/null
@@ -0,0 +1,63 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf / Anil Gunturu
+# Creation Date: 05/22/2014
+# 
+
+##
+# Allow specific compiler warnings
+##
+rift_allow_compiler_warning(unused-but-set-variable)
+
+set(VALA_NAME rwcal)
+set(VALA_FILES ${VALA_NAME}.vala)
+set(VALA_VERSION 1.0)
+set(VALA_RELEASE 1)
+set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION})
+set(VALA_TYPELIB_PREFIX RwCal-${VALA_VERSION})
+
+rift_add_vala(
+  ${VALA_LONG_NAME}
+  VALA_FILES ${VALA_FILES}
+  VALA_PACKAGES
+    rw_types-1.0 rw_yang-1.0 rw_keyspec-1.0 rw_yang_pb-1.0 rw_schema_proto-1.0
+    rw_log_yang-1.0 rw_base_yang-1.0 rwcal_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0
+    rw_log-1.0
+  VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwschema/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwvcs/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwlog/src
+  GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwschema/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwvcs/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwlog/src
+  GENERATE_HEADER_FILE ${VALA_NAME}.h
+  GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
+  GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
+  GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
+  GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
+  DEPENDS rwcal_yang rwlog_gi rwschema_yang rwmanifest_yang
+  )
+
+rift_install_vala_artifacts(
+  HEADER_FILES ${VALA_NAME}.h
+  SO_FILES lib${VALA_LONG_NAME}.so
+  VAPI_FILES ${VALA_LONG_NAME}.vapi
+  GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
+  TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
+  COMPONENT ${PKG_LONG_NAME}
+  DEST_PREFIX .
+  )
+
+
+set(subdirs
+  rwcal_zk
+  rwcal_cloudsim
+  rwcal_cloudsimproxy
+  rwcal_mock
+  rwcal_openstack
+  rwcal_openmano
+  rwcal_aws
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/modules/core/rwvx/rwcal/plugins/vala/Makefile b/modules/core/rwvx/rwcal/plugins/vala/Makefile
new file mode 100644 (file)
index 0000000..345c5f3
--- /dev/null
@@ -0,0 +1,24 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal.vala b/modules/core/rwvx/rwcal/plugins/vala/rwcal.vala
new file mode 100644 (file)
index 0000000..c7a497c
--- /dev/null
@@ -0,0 +1,321 @@
+namespace RwCal {
+
+  public static delegate void rwcal_callback(void * rwcal, void * user_data, int length);
+  public class Closure: GLib.Object {
+    public void * m_rwcal;
+    public void * m_user_data;
+    [CCode (array_length = false, array_null_terminated = true)]
+    public string [] m_data;
+    public rwcal_callback m_callback;
+
+    public void store_data([CCode (array_length = false, array_null_terminated = true)]
+                           string [] data_in) {
+      int idx = 0;
+      m_data = new string[data_in.length];
+      while (idx < data_in.length) {
+        m_data[idx] = data_in[idx];
+        idx = idx + 1;
+      }
+    }
+    public void callback() {
+      void * [] ret_ptrs = {};
+      ret_ptrs += m_user_data;
+      int idx = 0;
+      while (idx < m_data.length) {
+        ret_ptrs += m_data[idx];
+        idx = idx + 1;
+      }
+      m_callback(m_rwcal, ret_ptrs, ret_ptrs.length);
+    }
+  }
+
+  public interface Zookeeper: GLib.Object {
+    /* These functions are just wrappers on what is exposed by the python
+     * module rwcal.rwzk.  See there for actual documentation.
+     */
+
+    public abstract RwTypes.RwStatus create_server_config(
+      int id,
+      bool unique_ports,
+      [CCode (array_length = false, array_null_terminated = true)]
+      string [] server_names);
+    public abstract RwTypes.RwStatus server_start(int id);
+
+    public abstract RwTypes.RwStatus kazoo_init(
+      bool unique_ports,
+      [CCode (array_length = false, array_null_terminated = true)]
+      string [] server_names);
+
+    public abstract RwTypes.RwStatus zake_init();
+
+    public abstract RwTypes.RwStatus lock(string path, float timeout);
+
+    public abstract RwTypes.RwStatus unlock(string path);
+
+    public abstract bool locked(string path);
+
+    public abstract RwTypes.RwStatus create(string path,
+                                            Closure closure);
+
+    public abstract bool exists(string path);
+
+    public abstract RwTypes.RwStatus get(string path, out string data,
+                                         Closure closure);
+
+    public abstract RwTypes.RwStatus set(string path, string data,
+                                         Closure closure);
+
+    public abstract RwTypes.RwStatus children(
+      string path,
+      [CCode (array_length = false, array_null_terminated = true)]
+      out string [] children,
+      Closure closure);
+
+    public abstract RwTypes.RwStatus rm(string path,
+                                        Closure closure);
+
+    public abstract RwTypes.RwStatus register_watcher(string path,
+                                                      Closure closure);
+
+    public abstract RwTypes.RwStatus unregister_watcher(string path,
+                                                        Closure closure);
+  }
+
+  public interface Cloud: GLib.Object {
+    /*
+     * Init routine
+     */
+    public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx);
+
+    /*
+     * Cloud Account Credentails Validation related API
+     */
+    public abstract RwTypes.RwStatus validate_cloud_creds(
+      Rwcal.CloudAccount account,
+      out Rwcal.CloudConnectionStatus status);
+
+    /*
+     * Image related APIs
+     */
+    public abstract RwTypes.RwStatus get_image_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources images);
+
+    public abstract RwTypes.RwStatus create_image(
+      Rwcal.CloudAccount account,
+      Rwcal.ImageInfoItem image,
+      out string image_id);
+
+    public abstract RwTypes.RwStatus delete_image(
+      Rwcal.CloudAccount account,
+      string image_id);
+
+    public abstract RwTypes.RwStatus get_image(
+        Rwcal.CloudAccount account,
+        string image_id,
+        out Rwcal.ImageInfoItem image);
+
+    /*
+     * VM Releated APIs
+     */
+    public abstract RwTypes.RwStatus create_vm(
+      Rwcal.CloudAccount account,
+      Rwcal.VMInfoItem vm,
+      out string vm_id);
+
+    public abstract RwTypes.RwStatus start_vm(
+      Rwcal.CloudAccount account,
+      string vm_id);
+
+    public abstract RwTypes.RwStatus stop_vm(
+      Rwcal.CloudAccount account,
+      string vm_id);
+
+    public abstract RwTypes.RwStatus delete_vm(
+      Rwcal.CloudAccount account,
+      string vm_id);
+
+    public abstract RwTypes.RwStatus reboot_vm(
+      Rwcal.CloudAccount account,
+      string vm_id);
+
+    public abstract RwTypes.RwStatus get_vm_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources vms);
+
+    public abstract RwTypes.RwStatus get_vm(
+      Rwcal.CloudAccount account,
+      string vm_id,
+      out Rwcal.VMInfoItem vm);
+
+    /*
+     * Flavor related APIs
+     */
+    public abstract RwTypes.RwStatus create_flavor(
+      Rwcal.CloudAccount account,
+      Rwcal.FlavorInfoItem flavor_info_item,
+      out string flavor_id);
+
+    public abstract RwTypes.RwStatus delete_flavor(
+      Rwcal.CloudAccount account,
+      string flavor_id);
+
+    public abstract RwTypes.RwStatus get_flavor_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources flavors);
+
+    public abstract RwTypes.RwStatus get_flavor(
+      Rwcal.CloudAccount account,
+      string flavor_id,
+      out Rwcal.FlavorInfoItem flavor);
+
+
+    /*
+     * Tenant related APIs
+     */
+    public abstract RwTypes.RwStatus create_tenant(
+      Rwcal.CloudAccount account,
+      string tenant_name,
+      [CCode (array_length = false, array_null_terminated = true)]
+      out string [] tenant_info);
+
+    public abstract RwTypes.RwStatus delete_tenant(
+      Rwcal.CloudAccount account,
+      string tenant_id);
+
+    public abstract RwTypes.RwStatus get_tenant_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources tenants);
+
+    /*
+     * Role related APIs
+     */
+    public abstract RwTypes.RwStatus create_role(
+      Rwcal.CloudAccount account,
+      string role_name,
+      [CCode (array_length = false, array_null_terminated = true)]
+      out string [] role_info);
+
+    public abstract RwTypes.RwStatus delete_role(
+      Rwcal.CloudAccount account,
+      string role_id);
+
+    public abstract RwTypes.RwStatus get_role_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources roles);
+
+    /*
+     * Port related APIs
+     */
+    public abstract RwTypes.RwStatus create_port(
+      Rwcal.CloudAccount account,
+      Rwcal.PortInfoItem port,
+      out string port_id);
+
+    public abstract RwTypes.RwStatus delete_port(
+      Rwcal.CloudAccount account,
+      string port_id);
+
+    public abstract RwTypes.RwStatus get_port(
+      Rwcal.CloudAccount account,
+      string port_id,
+      out Rwcal.PortInfoItem port);
+
+    public abstract RwTypes.RwStatus get_port_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources ports);
+
+    /*
+     * Host related APIs
+     */
+    public abstract RwTypes.RwStatus add_host(
+      Rwcal.CloudAccount account,
+      Rwcal.HostInfoItem host,
+      out string host_id);
+
+    public abstract RwTypes.RwStatus remove_host(
+      Rwcal.CloudAccount account,
+      string host_id);
+
+    public abstract RwTypes.RwStatus get_host(
+      Rwcal.CloudAccount account,
+      string host_id,
+      out Rwcal.HostInfoItem host);
+
+    public abstract RwTypes.RwStatus get_host_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources hosts);
+
+    /*
+     * Network related APIs
+     */
+    public abstract RwTypes.RwStatus create_network(
+      Rwcal.CloudAccount account,
+      Rwcal.NetworkInfoItem network,
+      out string network_id);
+
+    public abstract RwTypes.RwStatus delete_network(
+      Rwcal.CloudAccount account,
+      string network_id);
+
+    public abstract RwTypes.RwStatus get_network(
+      Rwcal.CloudAccount account,
+      string network_id,
+      out Rwcal.NetworkInfoItem network);
+
+    public abstract RwTypes.RwStatus get_network_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources networks);
+
+    public abstract RwTypes.RwStatus get_management_network(
+      Rwcal.CloudAccount account,
+      out Rwcal.NetworkInfoItem network);
+
+    /*
+     * Higher Order CAL APIs
+     */
+    public abstract RwTypes.RwStatus create_virtual_link(
+      Rwcal.CloudAccount account,
+      Rwcal.VirtualLinkReqParams link_params,
+      out string link_id);
+    
+    public abstract RwTypes.RwStatus delete_virtual_link(
+      Rwcal.CloudAccount account,
+      string link_id);
+
+    public abstract RwTypes.RwStatus get_virtual_link(
+      Rwcal.CloudAccount account,
+      string link_id,
+      out Rwcal.VirtualLinkInfoParams response);
+
+    public abstract RwTypes.RwStatus get_virtual_link_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VNFResources resources);
+
+
+    public abstract RwTypes.RwStatus create_vdu(
+      Rwcal.CloudAccount account,
+      Rwcal.VDUInitParams vdu_params,
+      out string vdu_id);
+
+    public abstract RwTypes.RwStatus modify_vdu(
+      Rwcal.CloudAccount account,
+      Rwcal.VDUModifyParams vdu_params);
+    
+    public abstract RwTypes.RwStatus delete_vdu(
+      Rwcal.CloudAccount account,
+      string vdu_id);
+
+    public abstract RwTypes.RwStatus get_vdu(
+      Rwcal.CloudAccount account,
+      string vdu_id,
+      out Rwcal.VDUInfoParams response);
+    
+    public abstract RwTypes.RwStatus get_vdu_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VNFResources resources);
+    
+  }
+}
+
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/CMakeLists.txt b/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/CMakeLists.txt
new file mode 100644 (file)
index 0000000..3289100
--- /dev/null
@@ -0,0 +1,25 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+include(rift_plugin)
+
+set(PKG_NAME rwcal-aws)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+rift_install_python_plugin(rwcal_aws rwcal_aws.py)
+
+rift_python_install_tree(
+  FILES
+    rift/rwcal/aws/__init__.py
+    rift/rwcal/aws/aws_table.py
+    rift/rwcal/aws/aws_drv.py
+    rift/rwcal/aws/exceptions.py
+    rift/rwcal/aws/prepare_vm.py
+    rift/rwcal/aws/delete_vm.py
+  PYTHON3_ONLY
+  COMPONENT ${PKG_LONG_NAME})
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/Makefile b/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/Makefile
new file mode 100644 (file)
index 0000000..345c5f3
--- /dev/null
@@ -0,0 +1,24 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/__init__.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/__init__.py
new file mode 100644 (file)
index 0000000..4ce1fa2
--- /dev/null
@@ -0,0 +1 @@
+from .aws_drv import AWSDriver
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_drv.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_drv.py
new file mode 100644 (file)
index 0000000..c816677
--- /dev/null
@@ -0,0 +1,963 @@
+#!/usr/bin/python
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import boto3
+import botocore
+from . import aws_table
+from . import exceptions
+
+import logging
+logger = logging.getLogger('rwcal.aws.drv')
+logger.setLevel(logging.DEBUG)
+
+class AWSDriver(object):
+    """
+    Driver for AWS
+    """
+    def __init__(self, key, secret, region,ssh_key=None,vpcid = None,availability_zone = None,default_subnet_id = None):
+        """
+          Constructor for AWSDriver
+          Arguments:
+             key    : AWS user access key
+             secret : AWS user access secret
+             region : AWS region
+             ssh_key: Name of key pair to connect to EC2 instance
+             vpcid  : VPC ID for the resources
+             availability_zone: Avaialbility zone to allocate EC2 instance.
+             default_subnet_id: Default subnet id to be used for the EC2 instance interfaces at instance creation time
+          Returns: AWS Driver Object 
+        """
+        self._access_key    = key
+        self._access_secret = secret
+        self._region        = region
+        self._availability_zone =  availability_zone
+        self._ssh_key       = ssh_key
+        
+        self._sess  = boto3.session.Session(aws_access_key_id = self._access_key,
+                                            aws_secret_access_key = self._access_secret,
+                                            region_name = self._region)
+        self._ec2_resource_handle = self._sess.resource(service_name = 'ec2')
+        self._s3_handle  = self._sess.resource(service_name = 's3')
+        self._iam_handle = self._sess.resource(service_name = 'iam')
+
+        self._acct_arn = self._iam_handle.CurrentUser().arn
+        self._account_id = self._acct_arn.split(':')[4]
+        # If VPC id is not passed; use default VPC for the account 
+        if vpcid is None:
+            self._vpcid = self._default_vpc_id
+        else:
+            self._vpcid  = vpcid
+
+        self._default_subnet_id = default_subnet_id 
+        # If default_subnet_is is not passed; get default subnet for AZ.
+        # We use this to create first network interface during instance creation time. This subnet typically should have associate public address 
+        # to get public address.  
+        if default_subnet_id is None:
+            self._default_subnet_id = self._get_default_subnet_id_for_az 
+           
+       
+    @property
+    def default_subnet_id(self):
+        """
+           Returns default subnet id for account
+        """
+        return self._default_subnet_id
+
+    @property
+    def _ec2_client_handle(self):
+        """
+        Low level EC2 client connection handle
+           Arguments: None
+           Returns: EC2 Client Connection Handle
+        """
+        return self._ec2_resource_handle.meta.client
+
+    @property
+    def _default_vpc_id(self):
+        """
+        Method to get Default VPC ID
+          Arguments: None
+          Returns: Default EC2.Vpc Resource ID for AWS account
+        """
+        return self._default_vpc.vpc_id
+
+    @property
+    def _default_vpc(self):
+        """
+        Method to get Default VPC Resource Object
+           Arguments: None
+           Returns: Default EC2.Vpc Resource for AWS account
+        """
+        try:
+           response = list(self._ec2_resource_handle.vpcs.all())
+        except Exception as e:
+            logger.error("AWSDriver: Get of Default VPC failed with exception: %s" %(repr(e)))
+            raise
+        default_vpc = [vpc for vpc in response if vpc.is_default]
+        assert(len(default_vpc) == 1)
+        return default_vpc[0]
+
+    def _get_vpc_info(self,VpcId):
+        """
+        Get Vpc resource for specificed VpcId
+          Arguments:
+            - VpcId (String) : VPC ID  
+          Returns: EC2.Vpc Resouce
+        """ 
+        VpcIds = list()
+        VpcIds.append(VpcId)
+        response = list(self._ec2_resource_handle.vpcs.filter(
+                                               VpcIds = VpcIds))
+        if response:
+            assert(len(response) == 1)
+            return response[0]
+        return None
+
+
+    def upload_image(self, **kwargs):
+        """
+        Upload image to s3
+          Arguments: **kwargs -- dictionary
+               {
+                 'image_path'          : File location for the image,
+                 'image_prefix'        : Name-Prefix of the image on S3 
+                 'public_key'          : The path to the user's PEM encoded RSA public key certificate file,
+                 'private_key'         : The path to the user's PEM encoded RSA private key file,
+                 'arch'                : One of ["i386", "x86_64"],
+                 's3_bucket'           : Name of S3 bucket where this image should be uploaded
+                                         (e.g. 'Rift.Cal' or 'Rift.VNF' or 'Rift.3rdPartyVM' etc)
+                 'kernelId'            : Id of the default kernel to launch the AMI with (OPTIONAL)
+                 'ramdiskId'           : Id of the default ramdisk to launch the AMI with (OPTIONAL)
+                 'block_device_mapping : block_device_mapping string  (OPTIONAL)
+                                         Default block-device-mapping scheme to launch the AMI with. This scheme
+                                         defines how block devices may be exposed to an EC2 instance of this AMI
+                                         if the instance-type of the instance is entitled to the specified device.
+                                         The scheme is a comma-separated list of key=value pairs, where each key
+                                         is a "virtual-name" and each value, the corresponding native device name
+                                         desired. Possible virtual-names are:
+                                         - "ami": denotes the root file system device, as seen by the instance.
+                                         - "root": denotes the root file system device, as seen by the kernel.
+                                         - "swap": denotes the swap device, if present.
+                                         - "ephemeralN": denotes Nth ephemeral store; N is a non-negative integer.
+                                          Note that the contents of the AMI form the root file system. Samples of
+                                          block-device-mappings are:
+                                          '"ami=sda1","root=/dev/sda1","ephemeral0=sda2","swap=sda3"'
+                                          '"ami=0","root=/dev/dsk/c0d0s0","ephemeral0=1"'
+               }
+          Returns: None
+        """
+        import subprocess
+        import tempfile
+        import os
+        import shutil
+        
+        CREATE_BUNDLE_CMD  = 'ec2-bundle-image --cert {public_key} --privatekey {private_key} --user {account_id} --image {image_path} --prefix {image_prefix} --arch {arch}'
+        UPLOAD_BUNDLE_CMD  = 'ec2-upload-bundle --bucket {bucket} --access-key {key} --secret-key {secret} --manifest {manifest} --region {region} --retry'
+        
+        cmdline = CREATE_BUNDLE_CMD.format(public_key    = kwargs['public_key'],
+                                           private_key   = kwargs['private_key'],
+                                           account_id    = self._account_id,
+                                           image_path    = kwargs['image_path'],
+                                           image_prefix  = kwargs['image_prefix'],
+                                           arch          = kwargs['arch'])
+        
+        if 'kernelId' in kwargs:
+            cmdline += (' --kernel ' + kwargs['kernelId'])
+
+        if 'ramdiskId' in kwargs:
+            cmdline += (' --ramdisk ' + kwargs['ramdiskId'])
+            
+        if 'block_device_mapping' in kwargs:
+            cmdline += ' --block-device-mapping ' + kwargs['block_device_mapping']
+
+        ### Create Temporary Directory
+        try:
+            tmp_dir = tempfile.mkdtemp()
+        except Exception as e:
+            logger.error("Failed to create temporary directory. Exception Details: %s" %(repr(e)))
+            raise
+
+        cmdline += (" --destination " + tmp_dir)
+        logger.info('AWSDriver: Executing ec2-bundle-image command. Target directory name: %s. This command may take a while...\n' %(tmp_dir))
+        result = subprocess.call(cmdline.split())
+        if result == 0:
+            logger.info('AWSDriver: ec2-bundle-image command succeeded')
+        else:
+            logger.error('AWSDriver: ec2-bundle-image command failed. Return code %d. CMD: %s'%(result, cmdline))
+            raise OSError('AWSDriver: ec2-bundle-image command failed. Return code %d' %(result))
+        
+        logger.info('AWSDriver: Initiating image upload. This may take a while...')
+
+        cmdline = UPLOAD_BUNDLE_CMD.format(bucket   = kwargs['s3_bucket'],
+                                           key      = self._access_key,
+                                           secret   = self._access_secret,
+                                           manifest = tmp_dir+'/'+kwargs['image_prefix']+'.manifest.xml',
+                                           region   = self._region)
+        result = subprocess.call(cmdline.split())
+        if result == 0:
+            logger.info('AWSDriver: ec2-upload-bundle command succeeded')
+        else:
+            logger.error('AWSDriver: ec2-upload-bundle command failed. Return code %d. CMD: %s'%(result, cmdline))
+            raise OSError('AWSDriver: ec2-upload-bundle command failed. Return code %d' %(result))
+        ### Delete the temporary directory
+        logger.info('AWSDriver: Deleting temporary directory and other software artifacts')
+        shutil.rmtree(tmp_dir, ignore_errors = True)
+        
+                     
+    def register_image(self, **kwargs):
+        """
+        Registers an image uploaded to S3 with EC2
+           Arguments: **kwargs -- dictionary
+             {
+                Name (string)         : Name of the image
+                ImageLocation(string) : Location of image manifest file in S3 (e.g. 'rift.cal.images/test-img.manifest.xml')
+                Description(string)   : Description for the image (OPTIONAL)
+                Architecture (string) : Possible values 'i386' or 'x86_64' (OPTIONAL)
+                KernelId(string)      : Kernel-ID Refer: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs (OPTIONAL)
+                RamdiskId(string)     : Ramdisk-ID Refer: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs (OPTIONAL)
+                RootDeviceName(string): The name of the root device (for example, /dev/sda1 , or /dev/xvda ) (OPTIONAL)
+                BlockDeviceMappings(list) : List of dictionary of block device mapping (OPTIONAL)
+                                            [
+                                               {
+                                                 'VirtualName': 'string',
+                                                 'DeviceName': 'string',
+                                                 'Ebs': {
+                                                    'SnapshotId': 'string',
+                                                    'VolumeSize': 123,
+                                                    'DeleteOnTermination': True|False,
+                                                    'VolumeType': 'standard'|'io1'|'gp2',
+                                                    'Iops': 123,
+                                                    'Encrypted': True|False
+                                                 },
+                                                 'NoDevice': 'string'
+                                              },
+                                            ]
+                VirtualizationType(string): The type of virtualization (OPTIONAL)
+                                           Default: paravirtual
+                SriovNetSupport(string): (OPTIONAL)
+                       Set to ``simple`` to enable enhanced networking for the AMI and any instances that are launched from the AMI.
+                       This option is supported only for HVM AMIs. Specifying this option with a PV AMI can make instances launched from the AMI unreachable.
+        
+          Returns:
+             image_id: UUID of the image
+        """
+
+        kwargs['DryRun'] = False
+        try:
+            response = self._ec2_client_handle.register_image(**kwargs)
+        except Exception as e:
+            logger.error("AWSDriver: List image operation failed with exception: %s" %(repr(e)))
+            raise
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+        return response['ImageId']
+        
+
+    def deregister_image(self, ImageId):
+        """
+        DeRegisters image from EC2.
+          Arguments:
+            - ImageId (string): ImageId generated by AWS in register_image call
+          Returns: None
+        """
+        try:
+            response = self._ec2_client_handle.deregister_image(
+                                                         ImageId = ImageId)
+        except Exception as e:
+            logger.error("AWSDriver: deregister_image operation failed with exception: %s" %(repr(e)))
+            raise
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+        
+    def get_image(self, ImageId):
+        """
+        Returns a dictionary object describing the Image identified by ImageId
+        """
+        try:
+            response = list(self._ec2_resource_handle.images.filter(ImageIds = [ImageId]))
+        except Exception as e:
+            logger.error("AWSDriver: List image operation failed with exception: %s" %(repr(e)))
+            raise
+        return response[0]
+        
+    def list_images(self):
+        """
+        Returns list of dictionaries. Each dictionary contains attributes associated with image
+           Arguments: None
+           Returns: List of dictionaries.
+        """
+        try:
+            response = list(self._ec2_resource_handle.images.filter(Owners = [self._account_id]))
+        except Exception as e:
+            logger.error("AWSDriver: List image operation failed with exception: %s" %(repr(e)))
+            raise
+        return response
+
+    def create_image_from_instance(self,InstanceId,ImageName,VolumeSize = 16):
+        """
+        Creates AWS AMI from the instance root device Volume and registers the same
+        Caller is expected to stop the instance and restart the instance if required 
+        Arguments:
+           - InstanceId (String) : AWS EC2 Instance Id
+           - ImageName (String)  : Name for AMI
+         Returns
+           - AWS AMI Image Id
+        """
+
+        try:
+            inst = self.get_instance(InstanceId)
+            # Find Volume Id of Root Device
+            if inst.root_device_type == 'ebs':
+                for dev in inst.block_device_mappings:
+                    if inst.root_device_name == dev['DeviceName']:
+                        volume_id = dev['Ebs']['VolumeId']
+                        break
+
+                rsp=self._ec2_resource_handle.create_snapshot(VolumeId=volume_id)
+                snapshot_id = rsp.id
+
+                #Wait for the snapshot to be completed
+                attempts = 0
+                while attempts < 2:
+                    try:
+                        attempts = attempts + 1
+                        waiter = self._ec2_client_handle.get_waiter('snapshot_completed')
+                        waiter.wait(SnapshotIds=[snapshot_id])
+                    except botocore.exceptions.WaiterError as e:
+                        logger.error("AWSDriver: Create Snapshot for image still not completed. Will wait for another iteration") 
+                        continue
+                    except Exception as e:
+                        logger.error("AWSDriver: Createing Snapshot for instance failed during image creation: %s", (repr(e)))
+                        raise
+                    break
+                  
+                logger.debug("AWSDriver: Snapshot %s completed successfully from instance %s",snapshot_id,InstanceId)
+                image_id = self.register_image(Name=ImageName,VirtualizationType='hvm',
+                                               RootDeviceName='/dev/sda1',SriovNetSupport='simple',
+                                               BlockDeviceMappings=[{'DeviceName':'/dev/sda1',
+                                               'Ebs':{'SnapshotId':snapshot_id,'VolumeSize': VolumeSize,
+                                               'VolumeType': 'standard', 'DeleteOnTermination': True}}],
+                                               Architecture='x86_64')
+                return image_id
+            else:
+                logger.error("AWSDriver: Create Image failed as Instance Root device Type should be ebs to create image") 
+                raise exceptions.RWErrorFailure("AWSDriver: Create Image failed as Instance Root device Type should be ebs to create image")
+        except Exception as e:
+            logger.error("AWSDriver: Createing image from instance failed with exception: %s", (repr(e)))
+            raise
+        
+    def list_instances(self):
+        """
+        Returns list of resource object representing EC2 instance.
+           Arguments: None
+           Returns:  List of EC2.Instance object
+        """
+        instance_list = []
+        try:
+            # Skip Instances in terminated state
+            response = self._ec2_resource_handle.instances.filter(
+                                                           Filters = [
+                                                               { 'Name': 'instance-state-name',
+                                                                 'Values': ['pending',
+                                                                            'running',
+                                                                            'shutting-down',
+                                                                            'stopping',
+                                                                            'stopped']
+                                                            }
+                                                           ])
+        except Exception as e:
+            logger.error("AWSDriver: List instances operation failed with exception: %s" %(repr(e)))
+            raise
+        for instance in response:
+             instance_list.append(instance)
+        return instance_list
+
+    def get_instance(self, InstanceId):
+        """
+        Returns a EC2 resource Object describing the Instance identified by InstanceId
+           Arguments:
+             - InstnaceId (String) : MANDATORY, EC2 Instance Id
+           Returns: EC2.Instance object
+        """
+
+        try:
+            instance = list(self._ec2_resource_handle.instances.filter(
+                                                           InstanceIds = [InstanceId]))
+        except Exception as e:
+            logger.error("AWSDriver: Get instances operation failed with exception: %s" %(repr(e)))
+            raise
+        if len(instance) == 0:
+            logger.error("AWSDriver: instance with id %s not avaialble" %InstanceId)
+            raise exceptions.RWErrorNotFound("AWSDriver: instance with id %s not avaialble" %InstanceId)
+        elif len(instance) > 1:
+            logger.error("AWSDriver: Duplicate instances with id %s is avaialble" %InstanceId)
+            raise exceptions.RWErrorDuplicate("AWSDriver: Duplicate instances with id %s is avaialble" %InstanceId)
+        return instance[0] 
+
+    def create_instance(self,**kwargs):
+        """
+         Create an EC2instance.
+            Arguments: **kwargs -- dictionary
+               {
+                  ImageId (string): MANDATORY, Id of AMI to create instance 
+                  SubetId (string): Id of Subnet to start EC2 instance. EC2 instance will be started in VPC subnet resides. 
+                                    Default subnet from account used if not present
+                  InstanceType(string): AWS Instance Type name. Default: t2.micro
+                  SecurityGroupIds: AWS Security Group Id to associate with the instance. Default from VPC used if not present
+                  KeyName (string): Key pair name. Default key pair from account used if not present 
+                  MinCount (Integer): Minimum number of instance to start. Default: 1
+                  MaxCount (Integer): Maximum number of instance to start. Default: 1
+                  Placement (Dict) : Dictionary having Placement group details
+                                     {AvailabilityZone (String): AZ to create the instance}
+                  UserData (string) : cloud-init config file 
+               }
+            Returns: List of EC2.Instance object
+        """ 
+
+        if 'ImageId' not in kwargs:
+            logger.error("AWSDriver: Mandatory parameter ImageId not available during create_instance")
+            raise AttributeError("Mandatory parameter ImageId not available during create_instance")
+
+        #Validate image exists and is avaialble
+        try:
+            image_res = self._ec2_resource_handle.Image(kwargs['ImageId'])
+            image_res.load() 
+        except Exception as e:
+            logger.error("AWSDriver: Image with id %s not available and failed with exception: %s",kwargs['ImageId'],(repr(e)))
+            raise AttributeError("AWSDriver: Image with id %s not available and failed with exception: %s",kwargs['ImageId'],(repr(e)))
+        if image_res.state != 'available':
+            logger.error("AWSDriver: Image state is not available for image with id %s; Current state is %s",
+                         image_res.id,image_res.state)
+            raise AttributeError("ImageId is not valid")
+
+        # If MinCount or MaxCount is not passed set them to default of 1
+        if 'MinCount' not in kwargs:
+            kwargs['MinCount'] = 1  
+        if 'MaxCount' not in kwargs:
+            kwargs['MaxCount'] = kwargs['MinCount'] 
+
+        if 'KeyName' not in kwargs:
+            if not self._ssh_key:
+                logger.error("AWSDriver: Key not available during create_instance to allow SSH")
+            else:
+                kwargs['KeyName'] = self._ssh_key
+
+        if 'Placement' not in kwargs and self._availability_zone is not None:
+            placement = {'AvailabilityZone':self._availability_zone}
+            kwargs['Placement'] = placement
+
+        if 'SubnetId' not in kwargs and 'NetworkInterfaces' not in kwargs:
+            if self._default_subnet_id:
+                kwargs['SubnetId'] = self._default_subnet_id
+            else: 
+                logger.error("AWSDriver: Valid subnetid not present during create instance")
+                raise AttributeError("Valid subnet not present during create instance")
+
+        if self._availability_zone and 'SubnetId' in kwargs:
+            subnet = self.get_subnet(SubnetId= kwargs['SubnetId']) 
+            if not subnet:
+                logger.error("AWSDriver: Valid subnet not found for subnetid %s",kwargs['SubnetId'])
+                raise AttributeError("Valid subnet not found for subnetid %s",kwargs['SubnetId'])
+            if subnet.availability_zone != self._availability_zone:
+                logger.error("AWSDriver: AZ of Subnet %s %s doesnt match account AZ %s",kwargs['SubnetId'],
+                                       subnet.availability_zone,self._availability_zone)
+                raise AttributeError("AWSDriver: AZ of Subnet %s %s doesnt match account AZ %s",kwargs['SubnetId'],
+                                       subnet.availability_zone,self._availability_zone)
+
+        # If instance type is not passed; use t2.micro as default
+        if 'InstanceType' not in kwargs or kwargs['InstanceType'] is None:
+               kwargs['InstanceType'] = 't2.micro'
+        inst_type =  kwargs['InstanceType']
+        if inst_type not in aws_table.INSTANCE_TYPES.keys():
+            logger.error("AWSDriver: Invalid instance type %s used",inst_type)
+            raise AttributeError('InstanceType %s is not valid' %inst_type)
+
+        #validate instance_type for AMI 
+        if image_res.sriov_net_support == 'simple':
+            if image_res.virtualization_type != 'hvm':
+                logger.error("AWSDriver: Image with id %s has SRIOV net support but virtualization type is not hvm",kwargs['ImageId'])
+                raise AttributeError('Invalid Image with id %s' %kwargs['ImageId'])
+            if aws_table.INSTANCE_TYPES[inst_type]['sriov'] is False:
+                logger.warning("AWSDriver: Image %s support SR-IOV but instance type %s does not support HVM",kwargs['ImageId'],inst_type)
+
+        if image_res.virtualization_type == 'paravirtual' and aws_table.INSTANCE_TYPES[inst_type]['paravirt'] is False:  # Need to check virt type str for PV
+            logger.error("AWSDriver: Image %s requires PV support but instance %s does not support PV",kwargs['ImageId'],inst_type)
+            raise AttributeError('Image %s requires PV support but instance %s does not support PV',kwargs['ImageId'],inst_type)
+
+        if image_res.root_device_type == 'instance-store' and aws_table.INSTANCE_TYPES[inst_type]['disk'] ==  0: 
+            logger.error("AWSDriver: Image %s uses instance-store root device type that is not supported by instance type %s",kwargs['ImageId'],inst_type) 
+            raise AttributeError("AWSDriver: Image %s uses instance-store root device type that is not supported by instance type %s",kwargs['ImageId'],inst_type)
+
+
+        # Support of instance type varies across regions and also based on account. So we are not validating it
+        #if inst_type not in aws_table.REGION_DETAILS[self._region]['instance_types']:
+        #    logger.error("AWSDriver: instance type %s not supported in region %s",inst_type,self._region)
+        #    raise AttributeError("AWSDriver: instance type %s not supported in region %s",inst_type,self._region)
+
+        try:
+            instances = self._ec2_resource_handle.create_instances(**kwargs)
+        except Exception as e:
+            logger.error("AWSDriver: Creating instance failed with exception: %s" %(repr(e)))
+            raise  
+        return instances
+
+    def terminate_instance(self,InstanceId):
+        """
+        Termintae an EC2 instance
+           Arguments:
+            - InstanceId (String): ID of EC2 instance
+           Returns: None
+        """ 
+
+        InstanceIds = InstanceId
+        if type(InstanceIds) is not list:
+            InstanceIds = list()
+            InstanceIds.append(InstanceId)
+
+        try:
+            response = self._ec2_client_handle.terminate_instances(InstanceIds=InstanceIds)
+        except Exception as e:
+            logger.error("AWSDriver: Terminate instance failed with exception: %s" %(repr(e)))
+            raise  
+        return response 
+
+    def stop_instance(self,InstanceId):
+        """
+        Stop an EC2 instance. Stop is supported only for EBS backed instance
+           Arguments:
+            - InstanceId (String): ID of EC2 instance
+           Returns: None
+        """ 
+
+        InstanceIds = InstanceId
+        if type(InstanceIds) is not list:
+            InstanceIds = list()
+            InstanceIds.append(InstanceId)
+
+        try:
+            response = self._ec2_client_handle.stop_instances(InstanceIds=InstanceIds)
+        except Exception as e:
+            logger.error("AWSDriver: Stop for instance %s failed with exception: %s",InstanceId,repr(e))
+            raise  
+        return response 
+
+    def start_instance(self,InstanceId):
+        """
+        Start an EC2 instance. Start is supported only for EBS backed instance
+           Arguments:
+            - InstanceId (String): ID of EC2 instance
+           Returns: None
+        """ 
+
+        InstanceIds = InstanceId
+        if type(InstanceIds) is not list:
+            InstanceIds = list()
+            InstanceIds.append(InstanceId)
+
+        try:
+            response = self._ec2_client_handle.start_instances(InstanceIds=InstanceIds)
+        except Exception as e:
+            logger.error("AWSDriver: Start for instance %s failed with exception: %s",InstanceId,repr(e))
+            raise  
+        return response 
+       
+    @property
+    def _get_default_subnet_id_for_az(self):
+        """
+        Get default subnet id for AWS Driver registered Availability Zone 
+          Arguments: None
+          Returns: SubnetId (String)
+        """ 
+
+        if self._availability_zone:
+            subnet = self._get_default_subnet_for_az(self._availability_zone)
+            return subnet.id
+        else:
+            return None
+
+    def _get_default_subnet_for_az(self,AvailabilityZone):
+        """
+        Get default Subnet for Avaialbility Zone
+           Arguments:
+              - AvailabilityZone (String) : EC2 AZ
+           Returns: EC2.Subnet object
+        """
+
+        AvailabilityZones = [AvailabilityZone]
+        try:
+            response = list(self._ec2_resource_handle.subnets.filter(
+                                                              Filters = [
+                                                               {'Name':'availability-zone',
+                                                                 'Values': AvailabilityZones}]))
+        except Exception as e:
+            logger.error("AWSDriver: Get default subnet for Availability zone failed with exception: %s" %(repr(e)))
+            raise
+        default_subnet = [subnet for subnet in response if subnet.default_for_az is True and subnet.vpc_id == self._vpcid]
+        assert(len(default_subnet) == 1)
+        return default_subnet[0]
+        
+    def get_subnet_list(self,VpcId=None):
+        """
+        List all the subnets
+          Arguments:
+           - VpcId (String) - VPC ID to filter the subnet list
+        Returns: List of EC2.Subnet Object
+        """
+
+        try:
+            VpcIds = VpcId
+            if VpcId is not None:
+                if type(VpcIds) is not list:
+                    VpcIds = list()
+                    VpcIds.append(VpcId)
+                response = list(self._ec2_resource_handle.subnets.filter(
+                                              Filters = [
+                                              { 'Name': 'vpc-id',
+                                              'Values': VpcIds}]))
+            else:
+                response = list(self._ec2_resource_handle.subnets.all())
+        except Exception as e:
+            logger.error("AWSDriver: List subnets operation failed with exception: %s" %(repr(e)))
+            raise
+        return response 
+
+    def get_subnet(self,SubnetId):
+        """
+       Get the subnet for specified SubnetId
+          Arguments:
+             - SubnetId (String) - MANDATORY
+          Returns: EC2.Subnet Object
+       """
+
+        try:
+            response = list(self._ec2_resource_handle.subnets.filter(SubnetIds=[SubnetId]))
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnetID.NotFound':
+                logger.error("AWSDriver: Get Subnet Invalid SubnetID %s",SubnetId)
+                raise exceptions.RWErrorNotFound("AWSDriver: Delete Subnet Invalid SubnetID %s",SubnetId)
+           else:
+               logger.error("AWSDriver: Creating network interface failed with exception: %s",(repr(e)))
+               raise
+        except Exception as e:
+            logger.error("AWSDriver: Get subnet operation failed with exception: %s" %(repr(e)))
+            raise
+        if len(response) == 0:
+            logger.error("AWSDriver: subnet with id %s is not avaialble" %SubnetId)
+            raise exceptions.RWErrorNotFoun("AWSDriver: subnet with id %s is not avaialble" %SubnetId)
+        elif len(response) > 1: 
+            logger.error("AWSDriver: Duplicate subnet with id %s is avaialble" %SubnetId)
+            raise exceptions.RWErrorDuplicate("AWSDriver: Duplicate subnet with id %s is avaialble" %SubnetId)
+        return response[0] 
+
+    def create_subnet(self,**kwargs):
+        """
+        Create a EC2 subnet based on specified CIDR
+          Arguments:
+             - CidrBlock (String): MANDATORY. CIDR for subnet. CIDR should be within VPC CIDR
+             - VpcId (String): VPC ID to create the subnet. Default AZ from AWS Driver registration used if not present. 
+             - AvailabilityZone (String): Availability zone to create subnet. Default AZ from AWS Driver registration used
+                                          if not present
+          Returns: EC2.Subnet Object 
+        """
+
+        if 'CidrBlock' not in kwargs:
+            logger.error("AWSDriver: Insufficent params for create_subnet. CidrBlock is mandatory parameter")
+            raise AttributeError("AWSDriver: Insufficent params for create_subnet. CidrBlock is mandatory parameter")
+
+        if 'VpcId' not in kwargs:
+            kwargs['VpcId'] = self._vpcid
+        if 'AvailabilityZone' not in kwargs and self._availability_zone is not None:
+            kwargs['AvailabilityZone'] = self._availability_zone
+
+        vpc = self._get_vpc_info(kwargs['VpcId'])
+        if not vpc:
+            logger.error("AWSDriver: Subnet creation failed as VpcId %s does not exist", kwargs['VpcId'])
+            raise exceptions.RWErrorNotFound("AWSDriver: Subnet creation failed as VpcId %s does not exist", kwargs['VpcId'])
+        if vpc.state != 'available':
+            logger.error("AWSDriver: Subnet creation failed as VpcId %s is not in available state. Current state is %s", kwargs['VpcId'],vpc.state)
+            raise exceptions.RWErrorNotConnected("AWSDriver: Subnet creation failed as VpcId %s is not in available state. Current state is %s", kwargs['VpcId'],vpc.state)
+        
+        try:
+            subnet = self._ec2_resource_handle.create_subnet(**kwargs)
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnet.Conflict':
+                logger.error("AWSDriver: Create Subnet for ip %s failed due to overalp with existing subnet in VPC %s",kwargs['CidrBlock'],kwargs['VpcId'])
+                raise exceptions.RWErrorExists("AWSDriver: Create Subnet for ip %s failed due to overalp with existing subnet in VPC %s",kwargs['CidrBlock'],kwargs['VpcId'])
+           elif e.response['Error']['Code'] == 'InvalidSubnet.Range':
+                logger.error("AWSDriver: Create Subnet for ip %s failed as it is not in VPC CIDR range for VPC %s",kwargs['CidrBlock'],kwargs['VpcId'])
+                raise AttributeError("AWSDriver: Create Subnet for ip %s failed as it is not in VPC CIDR range for VPC %s",kwargs['CidrBlock'],kwargs['VpcId'])
+           else:
+               logger.error("AWSDriver: Creating subnet failed with exception: %s",(repr(e)))
+               raise  
+        except Exception as e:
+            logger.error("AWSDriver: Creating subnet failed with exception: %s" %(repr(e)))
+            raise  
+        return subnet
+
+    def modify_subnet(self,SubnetId,MapPublicIpOnLaunch):
+        """
+        Modify a EC2 subnet
+           Arguements: 
+               - SubnetId (String): MANDATORY, EC2 Subnet ID
+               - MapPublicIpOnLaunch (Boolean): Flag to indicate if subnet is associated with public IP 
+        """
+
+        try:
+            response = self._ec2_client_handle.modify_subnet_attribute(SubnetId=SubnetId,MapPublicIpOnLaunch={'Value':MapPublicIpOnLaunch})
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnetID.NotFound':
+                logger.error("AWSDriver: Modify Subnet Invalid SubnetID %s",SubnetId)
+                raise exceptions.RWErrorNotFound("AWSDriver: Modify Subnet Invalid SubnetID %s",SubnetId)
+           else:
+               logger.error("AWSDriver: Modify subnet failed with exception: %s",(repr(e)))
+               raise  
+        except Exception as e:
+            logger.error("AWSDriver: Modify subnet failed with exception: %s",(repr(e)))
+            raise
+
+
+    def delete_subnet(self,SubnetId):
+        """
+        Delete a EC2 subnet
+           Arguements: 
+               - SubnetId (String): MANDATORY, EC2 Subnet ID
+           Returns: None 
+        """
+
+        try:
+            response = self._ec2_client_handle.delete_subnet(SubnetId=SubnetId)
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnetID.NotFound':
+                logger.error("AWSDriver: Delete Subnet Invalid SubnetID %s",SubnetId)
+                raise exceptions.RWErrorNotFound("AWSDriver: Delete Subnet Invalid SubnetID %s",SubnetId)
+           else:
+               logger.error("AWSDriver: Delete subnet failed with exception: %s",(repr(e)))
+               raise  
+        except Exception as e:
+            logger.error("AWSDriver: Delete subnet failed with exception: %s",(repr(e)))
+            raise
+
+    def get_network_interface_list(self,SubnetId=None,VpcId=None,InstanceId = None):
+        """
+        List all the network interfaces
+           Arguments:
+              - SubnetId (String)
+              - VpcId (String)
+              - InstanceId (String)
+           Returns List of EC2.NetworkInterface  
+        """
+
+        try:
+            if InstanceId is not None:
+                InstanceIds = [InstanceId]
+                response = list(self._ec2_resource_handle.network_interfaces.filter(
+                                              Filters = [
+                                              { 'Name': 'attachment.instance-id',
+                                                 'Values': InstanceIds}]))
+            elif SubnetId is not None:
+                SubnetIds = SubnetId
+                if type(SubnetId) is not list:
+                    SubnetIds = list()
+                    SubnetIds.append(SubnetId)
+                response = list(self._ec2_resource_handle.network_interfaces.filter(
+                                              Filters = [
+                                              { 'Name': 'subnet-id',
+                                              'Values': SubnetIds}]))
+            elif VpcId is not None:
+                VpcIds = VpcId
+                if type(VpcIds) is not list:
+                    VpcIds = list()
+                    VpcIds.append(VpcId)
+                response = list(self._ec2_resource_handle.network_interfaces.filter(
+                                              Filters = [
+                                              { 'Name': 'vpc-id',
+                                              'Values': VpcIds}]))
+            else:
+                response = list(self._ec2_resource_handle.network_interfaces.all())
+        except Exception as e:
+            logger.error("AWSDriver: List network interfaces operation failed with exception: %s" %(repr(e)))
+            raise
+        return response
+
+    def get_network_interface(self,NetworkInterfaceId):
+        """
+       Get the network interface
+          Arguments:
+              NetworkInterfaceId (String): MANDATORY, EC2 Network Interface Id
+         Returns:  EC2.NetworkInterface Object
+       """
+
+        try:
+            response = list(self._ec2_resource_handle.network_interfaces.filter(NetworkInterfaceIds=[NetworkInterfaceId]))
+        except Exception as e:
+            logger.error("AWSDriver: List Network Interfaces operation failed with exception: %s" %(repr(e)))
+            raise
+        if len(response) == 0:
+            logger.error("AWSDriver: Network interface with id %s is not avaialble" %NetworkInterfaceId)
+            raise exceptions.RWErrorNotFound("AWSDriver: Network interface with id %s is not avaialble" %NetworkInterfaceId)
+        elif len(response) > 1:
+            logger.error("AWSDriver: Duplicate Network interface with id %s is avaialble" %NetworkInterfaceId)
+            raise exceptions.RWErrorDuplicate("AWSDriver: Duplicate Network interface with id %s is avaialble" %NetworkInterfaceId)
+        return response[0] 
+
+    def create_network_interface(self,**kwargs):
+        """
+        Create a network interface in specified subnet 
+          Arguments:
+             - SubnetId (String): MANDATORY, Subnet to create network interface
+          Returns: EC2.NetworkInterface Object
+        """
+
+        if 'SubnetId' not in kwargs:
+            logger.error("AWSDriver: Insufficent params for create_network_inteface . SubnetId is mandatory parameters")
+            raise AttributeError("AWSDriver: Insufficent params for create_network_inteface . SubnetId is mandatory parameters")
+
+        try:
+            interface = self._ec2_resource_handle.create_network_interface(**kwargs)
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnetID.NotFound':
+                logger.error("AWSDriver: Create Network interface failed as subnet %s is not found",kwargs['SubnetId'])
+                raise exceptions.RWErrorNotFound("AWSDriver: Create Network interface failed as subnet %s is not found",kwargs['SubnetId'])
+           else:
+               logger.error("AWSDriver: Creating network interface failed with exception: %s",(repr(e)))
+               raise
+        except Exception as e:
+            logger.error("AWSDriver: Creating network interface failed with exception: %s" %(repr(e)))
+            raise
+        return interface
+
+    def delete_network_interface(self,NetworkInterfaceId):
+        """
+        Delete a network interface
+         Arguments:
+            - NetworkInterfaceId(String): MANDATORY
+         Returns: None
+        """
+        try:
+            response = self._ec2_client_handle.delete_network_interface(NetworkInterfaceId=NetworkInterfaceId)
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidNetworkInterfaceID.NotFound':
+                logger.error("AWSDriver: Delete Network interface not found for interface ID  %s",NetworkInterfaceId)
+                raise exceptions.RWErrorNotFound("AWSDriver: Delete Network interface not found for interface ID  %s",NetworkInterfaceId)
+           else:
+               logger.error("AWSDriver: Delete network interface failed with exception: %s",(repr(e)))
+               raise  
+        except Exception as e:
+            logger.error("AWSDriver: Delete network interface failed with exception: %s",(repr(e)))
+            raise
+
+    def associate_public_ip_to_network_interface(self,NetworkInterfaceId):
+        """
+        Allocate a Elastic IP and associate to network interface
+          Arguments:
+            NetworkInterfaceId (String): MANDATORY
+          Returns: None
+        """
+        try:
+            response = self._ec2_client_handle.allocate_address(Domain='vpc')
+            self._ec2_client_handle.associate_address(NetworkInterfaceId=NetworkInterfaceId,AllocationId = response['AllocationId'])
+        except Exception as e:
+             logger.error("AWSDriver: Associating Public IP to network interface %s failed with exception: %s",NetworkInterfaceId,(repr(e)))
+             raise
+        return response
+
+    def disassociate_public_ip_from_network_interface(self,NetworkInterfaceId):
+        """
+        Disassociate a Elastic IP from network interface and release the same
+          Arguments:
+            NetworkInterfaceId (String): MANDATORY
+          Returns: None
+        """
+        try:
+            interface = self.get_network_interface(NetworkInterfaceId=NetworkInterfaceId) 
+            if interface  and interface.association and 'AssociationId' in interface.association:
+                self._ec2_client_handle.disassociate_address(AssociationId = interface.association['AssociationId'])
+                self._ec2_client_handle.release_address(AllocationId=interface.association['AllocationId'])
+        except Exception as e:
+             logger.error("AWSDriver: Associating Public IP to network interface %s failed with exception: %s",NetworkInterfaceId,(repr(e)))
+             raise
+
+    def attach_network_interface(self,**kwargs):
+        """
+        Attach network interface to running EC2 instance. Used to add additional interfaces to instance
+          Arguments:
+            - NetworkInterfaceId (String):  MANDATORY,
+            - InstanceId(String) :  MANDATORY
+            - DeviceIndex (Integer): MANDATORY
+          Returns: Dict with AttachmentId which is string
+        """
+
+        if 'NetworkInterfaceId' not in kwargs or 'InstanceId' not in kwargs or 'DeviceIndex' not in kwargs:
+            logger.error('AWSDriver: Attach network interface to instance requires NetworkInterfaceId and InstanceId as mandatory parameters')
+            raise AttributeError('AWSDriver: Attach network interface to instance requires NetworkInterfaceId and InstanceId as mandatory parameters')
+
+        try:
+            response = self._ec2_client_handle.attach_network_interface(**kwargs)
+        except Exception as e:
+            logger.error("AWSDriver: Attach network interface failed with exception: %s",(repr(e)))
+            raise
+        return response
+
+    def detach_network_interface(self,**kwargs):
+        """
+        Detach network interface from instance 
+          Arguments:
+            - AttachmentId (String)
+          Returns: None 
+        """
+
+        if 'AttachmentId' not in kwargs:
+            logger.error('AWSDriver: Detach network interface from instance requires AttachmentId as mandatory parameters')
+            raise AttributeError('AWSDriver: Detach network interface from instance requires AttachmentId as mandatory parameters')
+
+        try:
+            response = self._ec2_client_handle.detach_network_interface(**kwargs)
+        except Exception as e:
+            logger.error("AWSDriver: Detach network interface failed with exception: %s",(repr(e)))
+            raise
+
+    def map_flavor_to_instance_type(self,name,ram,vcpus,disk,inst_types = None):
+        """
+        Method to find a EC2 instance type matching the requested params
+          Arguments:
+             - name (String) : Name for flavor
+             - ram (Integer) : RAM size in MB
+             - vcpus (Integer): VPCU count
+             - disk (Integer): Storage size in GB
+             - inst_types (List): List of string having list of EC2 instance types to choose from
+                                  assumed to be in order of resource size 
+          Returns
+             InstanceType (String) - EC2 Instance Type
+        """
+        if inst_types is None:
+            inst_types = ['c3.large','c3.xlarge','c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge']
+        
+        for inst in inst_types:
+           if inst in aws_table.INSTANCE_TYPES:
+               if ( aws_table.INSTANCE_TYPES[inst]['ram'] > ram and  
+                    aws_table.INSTANCE_TYPES[inst]['vcpu'] > vcpus and 
+                    aws_table.INSTANCE_TYPES[inst]['disk'] > disk):
+                   return inst
+        return 't2.micro'  
+
+    def upload_ssh_key(self,key_name,public_key):
+        """
+        Method to upload Public Key to AWS
+          Arguments:
+            - keyname (String): Name for the key pair
+            - public_key (String): Base 64 encoded public key
+          Returns  None
+        """
+        self._ec2_resource_handle.import_key_pair(KeyName=key_name,PublicKeyMaterial=public_key) 
+
+    def delete_ssh_key(self,key_name):
+        """
+        Method to delete Public Key from AWS
+          Arguments:
+            - keyname (String): Name for the key pair
+          Returns  None
+        """
+        self._ec2_client_handle.delete_key_pair(KeyName=key_name) 
+             
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_table.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_table.py
new file mode 100644 (file)
index 0000000..c28c452
--- /dev/null
@@ -0,0 +1,451 @@
+#!/usr/bin/python
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+
+"""
+Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them.
+From http://aws.amazon.com/ec2/instance-types/
+max_inst From http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2 
+paravirt from https://aws.amazon.com/amazon-linux-ami/instance-type-matrix/
+"""
+INSTANCE_TYPES = {
+    'm4.large': {
+        'id': 'm4.large',
+        'name': 'Large Instance',
+        'ram': 8*1024,
+        'vcpu': 2,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm4.xlarge': {
+        'id': 'm4.xlarge',
+        'name': 'Large Instance',
+        'ram': 16*1024,
+        'vcpu': 4,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm4.2xlarge': {
+        'id': 'm4.2xlarge',
+        'name': 'Large Instance',
+        'ram': 32*1024,
+        'vcpu': 8,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm4.4xlarge': {
+        'id': 'm4.4xlarge',
+        'name': 'Large Instance',
+        'ram': 64*1024,
+        'vcpu': 16,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 10,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm4.10xlarge': {
+        'id': 'm4.10xlarge',
+        'name': 'Large Instance',
+        'ram': 160*1024,
+        'vcpu': 40,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 5,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm3.medium': {
+        'id': 'm3.medium',
+        'name': 'Medium Instance',
+        'ram': 3.75*1024, #3840
+        'vcpu': 1,
+        'disk': 4,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': True
+    },
+    'm3.large': {
+        'id': 'm3.large',
+        'name': 'Large Instance',
+        'ram': 7.5*1024, #7168
+        'vcpu': 2,
+        'disk': 32,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': True
+    },
+    'm3.xlarge': {
+        'id': 'm3.xlarge',
+        'name': 'Extra Large Instance',
+        'ram': 15*1024,#15360
+        'vcpu': 4,
+        'disk': 80,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': True
+    },
+    'm3.2xlarge': {
+        'id': 'm3.2xlarge',
+        'name': 'Double Extra Large Instance',
+        'ram': 30*1024, #30720
+        'vcpu': 8,
+        'disk': 160,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': True
+    },
+    'g2.2xlarge': {
+        'id': 'g2.2xlarge',
+        'name': 'Cluster GPU G2 Double Extra Large Instance',
+        'ram': 15000,
+        'disk': 60,
+        'vcpu': 5,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False
+    },
+    'g2.8xlarge': {
+        'id': 'g2.8xlarge',
+        'name': 'Cluster GPU G2 Double Extra Large Instance',
+        'ram': 60000,
+        'disk': 240,
+        'vcpu': 2,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False
+    },
+    # c4 instances have 2 SSDs of the specified disk size
+    'c4.large': {
+        'id': 'c4.large',
+        'name': 'Compute Optimized Large Instance',
+        'ram': 3750,
+         'vcpu':2,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'c4.xlarge': {
+        'id': 'c4.xlarge',
+        'name': 'Compute Optimized Extra Large Instance',
+        'ram': 7500,
+         'vcpu':4,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'c4.2xlarge': {
+        'id': 'c4.2xlarge',
+        'name': 'Compute Optimized Double Extra Large Instance',
+        'ram': 15000,
+         'vcpu':8,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'c4.4xlarge': {
+        'id': 'c4.4xlarge',
+        'name': 'Compute Optimized Quadruple Extra Large Instance',
+        'ram': 30000,
+         'vcpu':16,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 10,
+        'sriov': True,
+        'paravirt': False
+    },
+    'c4.8xlarge': {
+        'id': 'c4.8xlarge',
+        'name': 'Compute Optimized Eight Extra Large Instance',
+        'ram': 60000,
+         'vcpu':36,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 5,
+        'sriov': True,
+        'paravirt': False
+    },
+    # c3 instances have 2 SSDs of the specified disk size
+    'c3.large': {
+        'id': 'c3.large',
+        'name': 'Compute Optimized Large Instance',
+        'ram': 3750,
+         'vcpu':2,
+        'disk': 32,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    'c3.xlarge': {
+        'id': 'c3.xlarge',
+        'name': 'Compute Optimized Extra Large Instance',
+        'ram': 7500,
+        'vcpu':4,
+        'disk': 80,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    'c3.2xlarge': {
+        'id': 'c3.2xlarge',
+        'name': 'Compute Optimized Double Extra Large Instance',
+        'ram': 15000,
+        'vcpu':8,
+        'disk': 160,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    'c3.4xlarge': {
+        'id': 'c3.4xlarge',
+        'name': 'Compute Optimized Quadruple Extra Large Instance',
+        'ram': 30000,
+        'vcpu':16,
+        'disk': 320,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    'c3.8xlarge': {
+        'id': 'c3.8xlarge',
+        'name': 'Compute Optimized Eight Extra Large Instance',
+        'ram': 60000,
+        'vcpu':32,
+        'disk': 640,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    # i2 instances have up to eight SSD drives
+    'i2.xlarge': {
+        'id': 'i2.xlarge',
+        'name': 'High Storage Optimized Extra Large Instance',
+        'ram': 31232,
+         'vcpu': 4,
+        'disk': 800,
+        'bandwidth': None,
+        'max_inst': 8,
+        'sriov': True,
+        'paravirt': False
+    },
+    'i2.2xlarge': {
+        'id': 'i2.2xlarge',
+        'name': 'High Storage Optimized Double Extra Large Instance',
+        'ram': 62464,
+        'vcpu': 8,
+        'disk': 1600,
+        'bandwidth': None,
+        'max_inst': 8,
+        'sriov': True,
+        'paravirt': False
+    },
+    'i2.4xlarge': {
+        'id': 'i2.4xlarge',
+        'name': 'High Storage Optimized Quadruple Large Instance',
+        'ram': 124928,
+        'vcpu': 16,
+        'disk': 3200,
+        'bandwidth': None,
+        'max_inst': 4,
+        'sriov': True,
+        'paravirt': False
+    },
+    'i2.8xlarge': {
+        'id': 'i2.8xlarge',
+        'name': 'High Storage Optimized Eight Extra Large Instance',
+        'ram': 249856,
+        'vcpu': 32,
+        'disk': 6400,
+        'bandwidth': None,
+        'max_inst': 2,
+        'sriov': True,
+        'paravirt': False
+    },
+    'd2.xlarge': {
+        'id': 'd2.xlarge',
+        'name': 'High Storage Optimized Extra Large Instance',
+        'ram': 30050,
+        'vcpu': 4,
+        'disk': 6000,  # 3 x 2 TB
+        'max_inst': 20,
+        'bandwidth': None,
+        'sriov': True,
+        'paravirt': False
+    },
+    'd2.2xlarge': {
+        'id': 'd2.2xlarge',
+        'name': 'High Storage Optimized Double Extra Large Instance',
+        'ram': 61952,
+        'vcpu': 8,
+        'disk': 12000,  # 6 x 2 TB
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'd2.4xlarge': {
+        'id': 'd2.4xlarge',
+        'name': 'High Storage Optimized Quadruple Extra Large Instance',
+        'ram': 122000,
+        'vcpu': 16,
+        'disk': 24000,  # 12 x 2 TB
+        'bandwidth': None,
+        'max_inst': 10,
+        'sriov': True,
+        'paravirt': False
+    },
+    'd2.8xlarge': {
+        'id': 'd2.8xlarge',
+        'name': 'High Storage Optimized Eight Extra Large Instance',
+        'ram': 244000,
+        'vcpu': 36,
+        'disk': 48000,  # 24 x 2 TB
+        'bandwidth': None,
+        'max_inst': 5,
+        'sriov': True,
+        'paravirt': False
+    },
+    # 1x SSD
+    'r3.large': {
+        'id': 'r3.large',
+        'name': 'Memory Optimized Large instance',
+        'ram': 15000,
+        'vcpu': 2,
+        'disk': 32,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'r3.xlarge': {
+        'id': 'r3.xlarge',
+        'name': 'Memory Optimized Extra Large instance',
+        'ram': 30500,
+        'vcpu': 4,
+        'disk': 80,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'r3.2xlarge': {
+        'id': 'r3.2xlarge',
+        'name': 'Memory Optimized Double Extra Large instance',
+        'ram': 61000,
+        'vcpu': 8,
+        'disk': 160,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'r3.4xlarge': {
+        'id': 'r3.4xlarge',
+        'name': 'Memory Optimized Quadruple Extra Large instance',
+        'ram': 122000,
+        'vcpu': 16,
+        'disk': 320,
+        'bandwidth': None,
+        'max_inst': 10,
+        'sriov': True,
+        'paravirt': False
+    },
+    'r3.8xlarge': {
+        'id': 'r3.8xlarge',
+        'name': 'Memory Optimized Eight Extra Large instance',
+        'ram': 244000,
+        'vcpu': 32,
+        'disk': 320,  # x2
+        'bandwidth': None,
+        'max_inst': 5,
+        'sriov': True,
+        'paravirt': False
+    },
+    't2.micro': {
+        'id': 't2.micro',
+        'name': 'Burstable Performance Micro Instance',
+        'ram': 1024,
+        'disk': 0,  # EBS Only
+        'vcpu': 1,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False,
+        'extra': {
+            'cpu': 6
+        }
+    },
+    # Burstable Performance General Purpose
+    't2.small': {
+        'id': 't2.small',
+        'name': 'Burstable Performance Small Instance',
+        'ram': 2048,
+        'vcpu': 1,
+        'disk': 0,  # EBS Only
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False,
+        'extra': {
+            'cpu': 12
+        }
+    },
+    't2.medium': {
+        'id': 't2.medium',
+        'name': 'Burstable Performance Medium Instance',
+        'ram': 4096,
+        'disk': 0,  # EBS Only
+        'vcpu': 2,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False,
+        'extra': {
+            'cpu': 24
+        }
+    },
+    't2.large': {
+        'id': 't2.large',
+        'name': 'Burstable Performance Large Instance',
+        'ram': 8192,
+        'disk': 0,  # EBS Only
+        'vcpu': 2,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False,
+        'extra': {
+            'cpu': 36
+        }
+    }
+}
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/delete_vm.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/delete_vm.py
new file mode 100644 (file)
index 0000000..d1f3f92
--- /dev/null
@@ -0,0 +1,112 @@
+#!/usr/bin/env python3
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import rift.rwcal.aws as aws_drv
+import logging
+import argparse
+import sys, os, time
+
+#logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger('rwcal.aws.delete_vm')
+
+        
+def cleanup_vm(drv,argument):
+    vm_inst = drv.get_instance(argument.server_id)
+    logger.info("Waiting for VM instance to get to terminating state")
+    vm_inst.wait_until_terminated()
+    logger.info("VM inst is now in terminating state") 
+
+    for port_id in argument.vdu_port_list:
+        logger.info("Deleting network interface with id %s",port_id)
+        port = drv.get_network_interface(port_id)
+        if port:
+            if port.association and 'AssociationId' in port.association:
+                drv.disassociate_public_ip_from_network_interface(NetworkInterfaceId=port.id)
+            drv.delete_network_interface(port.id)
+        else:
+            logger.error("Newtork interface with id %s not found when deleting interface",port_id)
+    
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to create AWS resources')
+    parser.add_argument('--aws_key',
+                        action = "store",
+                        dest = "aws_key",
+                        type = str,
+                        help='AWS Key')
+
+    parser.add_argument('--aws_secret',
+                        action = "store",
+                        dest = "aws_secret",
+                        type = str,
+                        help = "AWS Secret")
+
+    parser.add_argument('--aws_region',
+                        action = "store",
+                        dest = "aws_region",
+                        type = str,
+                        help = "AWS Region")
+
+    parser.add_argument('--server_id',
+                        action = "store",
+                        dest = "server_id",
+                        type = str,
+                        help = "Server ID on which delete operations needs to be performed")
+    
+    parser.add_argument('--vdu_port_list',
+                        action = "append",
+                        dest = "vdu_port_list",
+                        default = [],
+                        help = "Port id list for vdu")
+
+    argument = parser.parse_args()
+
+    if not argument.aws_key:
+        logger.error("ERROR: AWS key is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS key: %s" %(argument.aws_key))
+
+    if not argument.aws_secret:
+        logger.error("ERROR: AWS Secret is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS Secret: %s" %(argument.aws_secret))
+
+    if not argument.aws_region:
+        logger.error("ERROR: AWS Region is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS Region: %s" %(argument.aws_region))
+
+    if not argument.server_id:
+        logger.error("ERROR: Server ID is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using Server ID : %s" %(argument.server_id))
+        
+    try:
+        pid = os.fork()
+        if pid > 0:
+            # exit for parent
+            sys.exit(0)
+    except OSError as e:
+        logger.error("fork failed: %d (%s)\n" % (e.errno, e.strerror))
+        sys.exit(2)
+        
+    drv = aws_drv.AWSDriver(key = argument.aws_key,
+                            secret  = argument.aws_secret,
+                            region  = argument.aws_region)
+    cleanup_vm(drv, argument)
+    sys.exit(0)
+    
+if __name__ == "__main__":
+    main()
+        
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/exceptions.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/exceptions.py
new file mode 100644 (file)
index 0000000..7b426db
--- /dev/null
@@ -0,0 +1,42 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+#
+# Rift Exceptions:
+#   These exceptions each coorespond with a rift status as they are defined
+# in rwtypes.vala.  Adding them here so that errors from C transistioning
+# back to python can be handled in a pythonic manner rather than having to
+# inspect return values.
+
+class RWErrorFailure(Exception):
+  pass
+
+class RWErrorDuplicate(Exception):
+  pass
+
+class RWErrorNotFound(Exception):
+  pass
+
+class RWErrorOutOfBounds(Exception):
+  pass
+
+class RWErrorBackpressure(Exception):
+  pass
+
+class RWErrorTimeout(Exception):
+  pass
+
+class RWErrorExists(Exception):
+  pass
+
+class RWErrorNotEmpty(Exception):
+  pass
+
+class RWErrorNotConnected(Exception):
+  pass
+
+class RWErrorNotSupported(Exception):
+  pass
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/prepare_vm.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/prepare_vm.py
new file mode 100644 (file)
index 0000000..e4154ce
--- /dev/null
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import rift.rwcal.aws as aws_drv
+import logging
+import argparse
+import sys, os, time
+
+#logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger('rwcal.aws.prepare_vm')
+
+        
+def prepare_vm_after_boot(drv,argument):
+    vm_inst = drv.get_instance(argument.server_id)
+    logger.info("Waiting for VM instance to get to running state")
+    vm_inst.wait_until_running()
+    logger.info("VM inst is now in running state") 
+    if argument.vdu_name:
+        vm_inst.create_tags(Tags=[{'Key': 'Name','Value':argument.vdu_name}])
+    if argument.vdu_node_id is not None:
+        vm_inst.create_tags(Tags=[{'Key':'node_id','Value':argument.vdu_node_id}])    
+    
+    for index,port_id in enumerate(argument.vdu_port_list):
+        logger.info("Attaching network interface with id %s to VDU instance %s",port_id,vm_inst.id)
+        drv.attach_network_interface(NetworkInterfaceId = port_id,InstanceId = vm_inst.id,DeviceIndex=index+1)
+    
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to create AWS resources')
+    parser.add_argument('--aws_key',
+                        action = "store",
+                        dest = "aws_key",
+                        type = str,
+                        help='AWS Key')
+
+    parser.add_argument('--aws_secret',
+                        action = "store",
+                        dest = "aws_secret",
+                        type = str,
+                        help = "AWS Secret")
+
+    parser.add_argument('--aws_region',
+                        action = "store",
+                        dest = "aws_region",
+                        type = str,
+                        help = "AWS Region")
+
+    parser.add_argument('--server_id',
+                        action = "store",
+                        dest = "server_id",
+                        type = str,
+                        help = "Server ID on which boot operations needs to be performed")
+    
+    parser.add_argument('--vdu_name',
+                        action = "store",
+                        dest = "vdu_name",
+                        type = str,
+                        help = "VDU name")
+
+    parser.add_argument('--vdu_node_id',
+                        action = "store",
+                        dest = "vdu_node_id",
+                        help = "Node id for vdu")
+
+    parser.add_argument('--vdu_port_list',
+                        action = "append",
+                        dest = "vdu_port_list",
+                        default = [],
+                        help = "Port id list for vdu")
+
+    argument = parser.parse_args()
+
+    if not argument.aws_key:
+        logger.error("ERROR: AWS key is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS key: %s" %(argument.aws_key))
+
+    if not argument.aws_secret:
+        logger.error("ERROR: AWS Secret is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS Secret: %s" %(argument.aws_secret))
+
+    if not argument.aws_region:
+        logger.error("ERROR: AWS Region is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS Region: %s" %(argument.aws_region))
+
+    if not argument.server_id:
+        logger.error("ERROR: Server ID is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using Server ID : %s" %(argument.server_id))
+        
+    try:
+        pid = os.fork()
+        if pid > 0:
+            # exit for parent
+            sys.exit(0)
+    except OSError as e:
+        logger.error("fork failed: %d (%s)\n" % (e.errno, e.strerror))
+        sys.exit(2)
+        
+    drv = aws_drv.AWSDriver(key = argument.aws_key,
+                            secret  = argument.aws_secret,
+                            region  = argument.aws_region)
+    prepare_vm_after_boot(drv, argument)
+    sys.exit(0)
+    
+if __name__ == "__main__":
+    main()
+        
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rwcal_aws.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_aws/rwcal_aws.py
new file mode 100644 (file)
index 0000000..b61294c
--- /dev/null
@@ -0,0 +1,1083 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import time
+import os
+import subprocess
+import logging
+import rift.rwcal.aws as aws_drv
+import rw_status
+import rwlogger
+import rift.rwcal.aws.exceptions as exceptions
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+logger = logging.getLogger('rwcal.aws')
+logger.setLevel(logging.DEBUG)
+
+PREPARE_VM_CMD = "prepare_vm.py --aws_key {key} --aws_secret {secret} --aws_region {region} --server_id {server_id}"
+DELETE_VM_CMD =  "delete_vm.py --aws_key {key} --aws_secret {secret} --aws_region {region} --server_id {server_id}"
+
+rwstatus = rw_status.rwstatus_from_exc_map({ IndexError: RwTypes.RwStatus.NOTFOUND,
+                                             KeyError: RwTypes.RwStatus.NOTFOUND,
+                                             NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,
+                                             AttributeError: RwTypes.RwStatus.FAILURE,
+                                             exceptions.RWErrorNotFound: RwTypes.RwStatus.NOTFOUND,
+                                             exceptions.RWErrorDuplicate: RwTypes.RwStatus.DUPLICATE,
+                                             exceptions.RWErrorExists: RwTypes.RwStatus.EXISTS,
+                                             exceptions.RWErrorNotConnected: RwTypes.RwStatus.NOTCONNECTED,})
+
+class RwcalAWSPlugin(GObject.Object, RwCal.Cloud):
+    """This class implements the CAL VALA methods for AWS."""
+     
+    flavor_id = 1;
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self._driver_class = aws_drv.AWSDriver
+        self._flavor_list = []
+
+
+    def _get_driver(self, account):
+        return self._driver_class(key     = account.aws.key,
+                                  secret  = account.aws.secret,
+                                  region  = account.aws.region,
+                                  ssh_key = account.aws.ssh_key,
+                                  vpcid   = account.aws.vpcid,
+                                  availability_zone = account.aws.availability_zone,
+                                  default_subnet_id = account.aws.default_subnet_id)
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(rwlogger.RwLogger(category="rwcal-aws",
+                                                log_hdl=rwlog_ctx,))
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        Performs an access to the resources using underlying API. If creds
+        are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus(
+                status="success",
+                details="AWS Cloud Account validation not implemented yet"
+                )
+
+        return status
+        
+    @rwstatus(ret_on_failure=[""])
+    def do_get_management_network(self, account):
+        """
+        Returns the management network associated with the specified account.
+        Arguments:
+            account - a cloud account
+
+        Returns: 
+            The management network
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_tenant(self, account, name):
+        """Create a new tenant.
+
+        Arguments:
+            account - a cloud account
+            name - name of the tenant
+
+        Returns:
+            The tenant id
+        """
+        raise NotImplementedError
+    
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """delete a tenant.
+
+        Arguments:
+            account - a cloud account
+            tenant_id - id of the tenant
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """List tenants.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of tenants
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_role(self, account, name):
+        """Create a new user.
+
+        Arguments:
+            account - a cloud account
+            name - name of the user
+
+        Returns:
+            The user id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """Delete a user.
+
+        Arguments:
+            account - a cloud account
+            role_id - id of the user
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """List roles.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of roles
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_image(self, account, image):
+        """Create an image
+
+        Arguments:
+            account - a cloud account
+            image - a description of the image to create
+
+        Returns:
+            The image id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """Delete a vm image.
+
+        Arguments:
+            account - a cloud account
+            image_id - id of the image to delete
+        """
+        raise NotImplementedError
+    
+    @staticmethod
+    def _fill_image_info(img_info):
+        """Create a GI object from image info dictionary
+
+        Converts image information dictionary object returned by AWS
+        driver into Protobuf Gi Object
+
+        Arguments:
+            account - a cloud account
+            img_info - image information dictionary object from AWS
+
+        Returns:
+            The ImageInfoItem
+        """
+        img = RwcalYang.ImageInfoItem()
+        img.name = img_info.name
+        img.id   = img_info.id
+
+        tag_fields = ['checksum']
+        # Look for any properties
+        if img_info.tags:
+            for tag in img_info.tags:
+                if tag['Key'] == 'checksum':
+                    setattr(img, tag['Key'], tag['Value'])
+        img.disk_format  = 'ami'
+        if img_info.state == 'available':
+            img.state = 'active'
+        else:
+            img.state = 'inactive'
+        return img
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """Return a list of the names of all available images.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            The the list of images in VimResources object
+        """
+        response = RwcalYang.VimResources()
+        image_list = []
+        images = self._get_driver(account).list_images()
+        for img in images:
+            response.imageinfo_list.append(RwcalAWSPlugin._fill_image_info(img))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        """Return a image information.
+
+        Arguments:
+            account - a cloud account
+            image_id - an id of the image
+
+        Returns:
+            ImageInfoItem object containing image information.
+        """
+        image = self._get_driver(account).get_image(image_id)
+        return RwcalAWSPlugin._fill_image_info(image)
+    
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vm(self, account, vminfo):
+        """Create a new virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vminfo - information that defines the type of VM to create
+
+        Returns:
+            The image id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """Start an existing virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        raise NotImplementedError
+        
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """Stop a running virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """Delete a virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """Reboot a virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        raise NotImplementedError
+
+    @staticmethod
+    def _fill_vm_info(vm_info):
+        """Create a GI object from vm info dictionary
+
+        Converts VM information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            vm_info - VM information from AWS
+
+        Returns:
+            Protobuf Gi object for VM
+        """
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_id     = vm_info.id
+        vm.image_id  = vm_info.image_id
+        vm.flavor_id = vm_info.instance_type
+        if vm_info.state['Name'] == 'running':
+            vm.state = 'active'
+        else:
+            vm.state = 'inactive'
+        for network_intf in vm_info.network_interfaces:
+            if 'Attachment' in network_intf and network_intf['Attachment']['DeviceIndex'] == 0:
+                if 'Association' in network_intf and 'PublicIp' in network_intf['Association']:
+                    vm.public_ip = network_intf['Association']['PublicIp']
+                vm.management_ip = network_intf['PrivateIpAddress']
+            else:
+                addr = vm.private_ip_list.add()
+                addr.ip_address = network_intf['PrivateIpAddress']
+                if 'Association' in network_intf and 'PublicIp' in network_intf['Association']:
+                    addr = vm.public_ip_list.add()
+                    addr.ip_address = network_intf['Association']['PublicIp']
+
+        if vm_info.placement and 'AvailabilityZone' in vm_info.placement:
+            vm.availability_zone = vm_info.placement['AvailabilityZone']
+        if vm_info.tags:
+            for tag in vm_info.tags:
+                if tag['Key'] == 'Name':
+                    vm.vm_name   = tag['Value']
+                elif tag['Key'] in vm.user_tags.fields:
+                    setattr(vm.user_tags,tag['Key'],tag['Value'])
+        return vm
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        """Return a list of the VMs as vala boxed objects
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List containing VM information
+        """
+        response = RwcalYang.VimResources()
+        vms = self._get_driver(account).list_instances()
+        for vm in vms:
+            response.vminfo_list.append(RwcalAWSPlugin._fill_vm_info(vm))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vm(self, account, id):
+        """Return vm information.
+
+        Arguments:
+            account - a cloud account
+            id - an id for the VM
+
+        Returns:
+            VM information
+        """
+        vm = self._get_driver(account).get_instance(id)
+        return RwcalAWSPlugin._fill_vm_info(vm)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_flavor(self, account, flavor):
+        """Create new flavor.
+           AWS has fixed set of AWS types and so we map flavor to existing instance type
+           and create local flavor for the same.  
+
+        Arguments:
+            account - a cloud account
+            flavor - flavor of the VM
+
+        Returns:
+            flavor id (with EC2 instance type included in id)
+        """
+        drv = self._get_driver(account)
+        inst_type = drv.map_flavor_to_instance_type(name      = flavor.name,
+                                ram       = flavor.vm_flavor.memory_mb,
+                                vcpus     = flavor.vm_flavor.vcpu_count,
+                                disk      = flavor.vm_flavor.storage_gb)
+        
+        new_flavor = RwcalYang.FlavorInfoItem()
+        new_flavor.name = flavor.name
+        new_flavor.vm_flavor.memory_mb = flavor.vm_flavor.memory_mb 
+        new_flavor.vm_flavor.vcpu_count = flavor.vm_flavor.vcpu_count 
+        new_flavor.vm_flavor.storage_gb = flavor.vm_flavor.storage_gb 
+        new_flavor.id = inst_type + '-' + str(RwcalAWSPlugin.flavor_id)
+        RwcalAWSPlugin.flavor_id = RwcalAWSPlugin.flavor_id+1
+        self._flavor_list.append(new_flavor)
+        return new_flavor.id 
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """Delete flavor.
+
+        Arguments:
+            account - a cloud account
+            flavor_id - id flavor of the VM
+        """
+
+        flavor = [flav for flav in self._flavor_list if flav.id == flavor_id]
+        self._flavor_list.delete(flavor[0])
+
+    @staticmethod
+    def _fill_flavor_info(flavor_info):
+        """Create a GI object from flavor info dictionary
+
+        Converts Flavor information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            flavor_info: Flavor information from openstack
+
+        Returns:
+             Object of class FlavorInfoItem
+        """
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name                       = flavor_info.name
+        flavor.id                         = flavor_info.id
+        flavor.vm_flavor.memory_mb = flavor_info.vm_flavor.memory_mb 
+        flavor.vm_flavor.vcpu_count = flavor_info.vm_flavor.vcpu_count 
+        flavor.vm_flavor.storage_gb = flavor_info.vm_flavor.storage_gb 
+        return flavor
+    
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """Return flavor information.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of flavors
+        """
+        response = RwcalYang.VimResources()
+        for flv in self._flavor_list:
+            response.flavorinfo_list.append(RwcalAWSPlugin._fill_flavor_info(flv))
+        return response
+    
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, id):
+        """Return flavor information.
+
+        Arguments:
+            account - a cloud account
+            id - an id for the flavor
+
+        Returns:
+            Flavor info item
+        """
+        flavor = [flav for flav in self._flavor_list if flav.id == id]
+        return (RwcalAWSPlugin._fill_flavor_info(flavor[0]))
+
+    def _fill_network_info(self, network_info, account):
+        """Create a GI object from network info dictionary
+
+        Converts Network information dictionary object returned by AWS
+        driver into Protobuf Gi Object
+
+        Arguments:
+            network_info - Network information from AWS
+            account - a cloud account
+
+        Returns:
+            Network info item
+        """
+        network                  = RwcalYang.NetworkInfoItem()
+        network.network_id       = network_info.subnet_id
+        network.subnet           = network_info.cidr_block
+        if network_info.tags:
+            for tag in network_info.tags:
+                if tag['Key'] == 'Name':
+                    network.network_name   = tag['Value']
+        return network
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        """Return a list of networks
+
+        Arguments:
+            account - a cloud account
+        
+        Returns:
+            List of networks
+        """
+        response = RwcalYang.VimResources()
+        networks = self._get_driver(account).get_subnet_list()
+        for network in networks:
+            response.networkinfo_list.append(self._fill_network_info(network, account))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, id):
+        """Return a network
+
+        Arguments:
+            account - a cloud account
+            id - an id for the network
+
+        Returns:
+            Network info item
+        """
+        network = self._get_driver(account).get_subnet(id)
+        return self._fill_network_info(network, account)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_network(self, account, network):
+        """Create a new network
+
+        Arguments:
+            account - a cloud account
+            network - Network object
+
+        Returns:
+            Network id
+        """
+        raise NotImplementedError
+    
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        """Delete a network
+
+        Arguments:
+            account - a cloud account
+            network_id - an id for the network
+        """
+        raise NotImplementedError
+    
+    @staticmethod
+    def _fill_port_info(port_info):
+        """Create a GI object from port info dictionary
+
+        Converts Port information dictionary object returned by AWS
+        driver into Protobuf Gi Object
+
+        Arguments:
+            port_info - Port/Network interface information from AWS
+
+        Returns:
+            Port info item
+        """
+        port = RwcalYang.PortInfoItem()
+
+        port.port_id    = port_info.id
+        port.network_id = port_info.subnet_id
+        if port_info.attachment and 'InstanceId' in port_info.attachment: 
+            port.vm_id = port_info.attachment['InstanceId']
+        port.ip_address = port_info.private_ip_address
+        if port_info.status == 'in-use':
+            port.port_state = 'active'
+        elif port_info.status == 'available':
+            port.port_state = 'inactive'
+        else:
+            port.port_state = 'unknown'
+        if port_info.tag_set:
+            for tag in port_info.tag_set:
+                if tag['Key'] == 'Name':
+                    port.port_name   = tag['Value']
+        return port
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        """Return a port
+
+        Arguments:
+            account - a cloud account
+            port_id - an id for the port
+
+        Returns:
+            Port info item
+        """
+        port = self._get_driver(account).get_network_interface(port_id)
+        return RwcalAWSPlugin._fill_port_info(port)
+    
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        """Return a list of ports
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            Port info list
+        """
+        response = RwcalYang.VimResources()
+        ports = self._get_driver(account).get_network_interface_list()
+        for port in ports:
+            response.portinfo_list.append(RwcalAWSPlugin._fill_port_info(port))
+        return response
+    
+    @rwstatus(ret_on_failure=[""])
+    def do_create_port(self, account, port):
+        """Create a new port
+
+        Arguments:
+            account - a cloud account
+            port - port object
+
+        Returns:
+            Port id
+        """
+        raise NotImplementedError
+    
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        """Delete a port
+
+        Arguments:
+            account - a cloud account
+            port_id - an id for port
+        """
+        raise NotImplementedError
+        
+    @rwstatus(ret_on_failure=[""])
+    def do_add_host(self, account, host):
+        """Add a new host
+
+        Arguments:
+            account - a cloud account
+            host - a host object
+
+        Returns:
+            An id for the host
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        """Remove a host
+
+        Arguments:
+            account - a cloud account
+            host_id - an id for the host
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        """Return a host
+
+        Arguments:
+            account - a cloud account
+            host_id - an id for host
+
+        Returns:
+            Host info item
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        """Return a list of hosts
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of hosts
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        drv = self._get_driver(account)
+        kwargs = {}
+        kwargs['CidrBlock'] = link_params.subnet 
+
+        subnet =  drv.create_subnet(**kwargs)
+        if link_params.name:
+            subnet.create_tags(Tags=[{'Key': 'Name','Value':link_params.name}])
+        if link_params.associate_public_ip:
+              drv.modify_subnet(SubnetId=subnet.id,MapPublicIpOnLaunch=link_params.associate_public_ip)
+        return subnet.id
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        """Delete a virtual link
+
+        Arguments:
+            account - a cloud account
+            link_id - id for the virtual-link to be deleted
+
+        Returns:
+            None
+        """
+        drv = self._get_driver(account)
+        port_list = drv.get_network_interface_list(SubnetId=link_id)
+        for port in port_list:
+            if port  and port.association and 'AssociationId' in port.association:
+                drv.disassociate_public_ip_from_network_interface(NetworkInterfaceId=port.id)
+            if port and port.attachment and 'AttachmentId' in port.attachment:
+                drv.detach_network_interface(AttachmentId = port.attachment['AttachmentId'],Force=True) #force detach as otherwise delete fails
+                #detach instance takes time; so poll to check port is not in-use
+                port = drv.get_network_interface(NetworkInterfaceId=port.id)
+                retries = 0
+                while port.status == 'in-use' and retries < 10:
+                    time.sleep(5)
+                    port = drv.get_network_interface(NetworkInterfaceId=port.id)
+            drv.delete_network_interface(NetworkInterfaceId=port.id)
+        drv.delete_subnet(link_id)
+        
+    @staticmethod
+    def _fill_connection_point_info(c_point, port_info):
+        """Create a GI object for RwcalYang.VDUInfoParams_ConnectionPoints()
+
+        Converts EC2.NetworkInterface object returned by AWS driver into 
+        Protobuf Gi Object  
+
+        Arguments:
+            port_info - Network Interface information from AWS
+        Returns:
+            Protobuf Gi object for RwcalYang.VDUInfoParams_ConnectionPoints
+        """
+        c_point.virtual_link_id = port_info.subnet_id
+        c_point.connection_point_id = port_info.id
+        if port_info.attachment:
+            c_point.vdu_id = port_info.attachment['InstanceId']
+        c_point.ip_address = port_info.private_ip_address
+        if port_info.association and 'PublicIp' in port_info.association:
+                c_point.public_ip = port_info.association['PublicIp']
+        if port_info.tag_set:
+            for tag in port_info.tag_set:
+                if tag['Key'] == 'Name':
+                    c_point.name   = tag['Value']
+        if port_info.status == 'in-use':
+            c_point.state = 'active'
+        elif port_info.status == 'available':
+            c_point.state = 'inactive'
+        else:
+            c_point.state = 'unknown'
+
+    @staticmethod
+    def _fill_virtual_link_info(network_info, port_list):
+        """Create a GI object for VirtualLinkInfoParams
+
+        Converts Subnet and NetworkInterface object
+        returned by AWS driver into Protobuf Gi Object  
+
+        Arguments:
+            network_info - Subnet information from AWS
+            port_list - A list of network interface information from openstack
+        Returns:
+            Protobuf Gi object for VirtualLinkInfoParams
+        """
+        link = RwcalYang.VirtualLinkInfoParams()
+        if network_info.state == 'available':
+            link.state = 'active'
+        else:
+            link.state = 'inactive'
+        link.virtual_link_id = network_info.subnet_id
+        link.subnet = network_info.cidr_block
+        if network_info.tags:
+            for tag in network_info.tags:
+                if tag['Key'] == 'Name':
+                    link.name   = tag['Value']
+        for port in port_list:
+            c_point = link.connection_points.add()
+            RwcalAWSPlugin._fill_connection_point_info(c_point, port)
+
+        return link
+
+    @staticmethod
+    def _fill_vdu_info(vm_info, port_list):
+        """Create a GI object for VDUInfoParams
+
+        Converts VM information dictionary object returned by AWS
+        driver into Protobuf Gi Object
+
+        Arguments:
+            vm_info - EC2 instance information from AWS
+            port_list - A list of network interface information from AWS
+        Returns:
+            Protobuf Gi object for VDUInfoParams
+        """
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.vdu_id = vm_info.id
+        mgmt_port = [port for port in port_list if port.attachment and port.attachment['DeviceIndex'] == 0]
+        assert(len(mgmt_port) == 1)
+        vdu.management_ip = mgmt_port[0].private_ip_address
+        if mgmt_port[0].association and 'PublicIp' in mgmt_port[0].association:
+            vdu.public_ip = mgmt_port[0].association['PublicIp']
+            #For now set managemnet ip also to public ip 
+            #vdu.management_ip = vdu.public_ip
+        if vm_info.tags:
+            for tag in vm_info.tags:
+                if tag['Key'] == 'Name':
+                    vdu.name   = tag['Value']
+                elif tag['Key'] == 'node_id':
+                    vdu.node_id = tag['Value']
+        vdu.image_id = vm_info.image_id
+        vdu.flavor_id = vm_info.instance_type
+        if vm_info.state['Name'] == 'running':
+            vdu.state = 'active'
+        else:
+            vdu.state = 'inactive'
+        #if vm_info.placement and 'AvailabilityZone' in vm_info.placement:
+        #    vdu.availability_zone = vm_info.placement['AvailabilityZone']
+        # Fill the port information
+        cp_port_list = [port for port in port_list if port.attachment and port.attachment['DeviceIndex'] != 0]
+        
+        for port in cp_port_list:
+            c_point = vdu.connection_points.add()
+            RwcalAWSPlugin._fill_connection_point_info(c_point, port)
+        return vdu
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        """Get information about virtual link.
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link 
+
+        Returns:
+            Object of type RwcalYang.VirtualLinkInfoParams
+        """
+        drv = self._get_driver(account)
+        network = drv.get_subnet(SubnetId=link_id)
+        port_list = drv.get_network_interface_list(SubnetId=link_id)
+        virtual_link = RwcalAWSPlugin._fill_virtual_link_info(network, port_list)
+        return virtual_link
+    
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_virtual_link_list(self, account):
+        """Get information about all the virtual links
+
+        Arguments:
+            account  - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VirtualLinkInfoParams
+        """
+        vnf_resources = RwcalYang.VNFResources()
+        drv = self._get_driver(account)
+        networks = drv.get_subnet_list()
+        for network in networks:
+            port_list = drv.get_network_interface_list(SubnetId=network.id)
+            virtual_link = RwcalAWSPlugin._fill_virtual_link_info(network, port_list)
+            vnf_resources.virtual_link_info_list.append(virtual_link)
+        return vnf_resources
+
+    def _create_connection_point(self, account, c_point):
+        """
+        Create a connection point
+        Arguments:
+           account  - a cloud account
+           c_point  - connection_points
+        """
+        drv = self._get_driver(account)
+        port     = drv.create_network_interface(SubnetId=c_point.virtual_link_id)
+        if c_point.name:
+            port.create_tags(Tags=[{'Key': 'Name','Value':c_point.name}])
+        if c_point.associate_public_ip:
+                drv.associate_public_ip_to_network_interface(NetworkInterfaceId = port.id)
+        return port
+    
+    def prepare_vdu_on_boot(self, account, server_id,vdu_init_params,vdu_port_list = None):
+        cmd = PREPARE_VM_CMD.format(key     = account.aws.key,
+                                  secret  = account.aws.secret,
+                                  region  = account.aws.region,
+                                  server_id = server_id)
+        if vdu_init_params.has_field('name'):
+            cmd += (" --vdu_name "+ vdu_init_params.name)
+        if vdu_init_params.has_field('node_id'):
+            cmd += (" --vdu_node_id "+ vdu_init_params.node_id)
+        if vdu_port_list is not None:
+            for port_id in vdu_port_list:
+                cmd += (" --vdu_port_list "+ port_id)  
+
+        exec_path = 'python3 ' + os.path.dirname(aws_drv.__file__)
+        exec_cmd = exec_path+'/'+cmd
+        logger.info("Running command: %s" %(exec_cmd))
+        subprocess.call(exec_cmd, shell=True)
+        
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        """Create a new virtual deployment unit
+
+        Arguments:
+            account     - a cloud account
+            vdu_init  - information about VDU to create (RwcalYang.VDUInitParams)
+
+        Returns:
+            The vdu_id
+        """
+        drv = self._get_driver(account)
+        ### First create required number of ports aka connection points
+        port_list = []
+        network_list = []
+
+        ### Now Create VM
+        kwargs = {}
+        kwargs['ImageId'] = vdu_init.image_id
+        # Get instance type from flavor id which is of form c3.xlarge-1
+        inst_type =  vdu_init.flavor_id.split('-')[0] 
+        kwargs['InstanceType'] = inst_type
+        if vdu_init.vdu_init and vdu_init.vdu_init.userdata:
+            kwargs['UserData'] = vdu_init.vdu_init.userdata
+
+        #If we need to allocate public IP address create network interface and associate elastic
+        #ip  to interface  
+        if vdu_init.allocate_public_address:
+           port_id     = drv.create_network_interface(SubnetId=drv.default_subnet_id)
+           drv.associate_public_ip_to_network_interface(NetworkInterfaceId = port_id.id)
+           network_interface  = {'NetworkInterfaceId':port_id.id,'DeviceIndex':0}
+           kwargs['NetworkInterfaces'] = [network_interface]
+        
+        #AWS Driver will use default subnet id to create first network interface 
+        # if network interface is not specified and will also have associate public ip 
+        # if enabled for the subnet    
+        vm_inst = drv.create_instance(**kwargs)
+
+        # Wait for instance to get to running state before attaching network interface
+        # to instance 
+        #vm_inst[0].wait_until_running()
+
+        #if vdu_init.name:
+            #vm_inst[0].create_tags(Tags=[{'Key': 'Name','Value':vdu_init.name}])
+        #if vdu_init.node_id is not None:
+            #vm_inst[0].create_tags(Tags=[{'Key':'node_id','Value':vdu_init.node_id}])    
+             
+        # Create the connection points
+        port_list = []
+        for index,c_point in enumerate(vdu_init.connection_points):
+            port_id = self._create_connection_point(account, c_point)
+            port_list.append(port_id.id)
+            #drv.attach_network_interface(NetworkInterfaceId = port_id.id,InstanceId = vm_inst[0].id,DeviceIndex=index+1)
+
+        # We wait for instance to get to running state and update name,node_id and attach network intfs
+        self.prepare_vdu_on_boot(account, vm_inst[0].id, vdu_init, port_list)
+
+        return vm_inst[0].id
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        """Modify Properties of existing virtual deployment unit
+
+        Arguments:
+            account     -  a cloud account
+            vdu_modify  -  Information about VDU Modification (RwcalYang.VDUModifyParams)
+        """
+        ### First create required number of ports aka connection points
+        drv = self._get_driver(account)
+        port_list = []
+        network_list = []
+
+        vm_inst = drv.get_instance(vdu_modify.vdu_id)
+
+        if vm_inst.state['Name'] != 'running':
+            logger.error("RWCAL-AWS: VM with id %s is not in running state during modify VDU",vdu_modify.vdu_id)
+            raise InvalidStateError("RWCAL-AWS: VM with id %s is not in running state during modify VDU",vdu_modify.vdu_id)
+
+        port_list = drv.get_network_interface_list(InstanceId = vdu_modify.vdu_id)
+        used_device_indexs = [port.attachment['DeviceIndex'] for port in port_list if port.attachment]
+
+        device_index = 1 
+        for c_point in vdu_modify.connection_points_add:
+            #Get unused device index
+            while device_index in used_device_indexs:
+                device_index = device_index+1     
+            port_id = self._create_connection_point(account, c_point)
+            drv.attach_network_interface(NetworkInterfaceId = port_id.id,InstanceId = vdu_modify.vdu_id,DeviceIndex =device_index)
+
+        ### Detach the requested connection_points
+        for c_point in vdu_modify.connection_points_remove:
+            port = drv.get_network_interface(NetworkInterfaceId=c_point.connection_point_id)
+            #Check if elastic IP is associated with interface and release it
+            if port  and port.association and 'AssociationId' in port.association:
+                drv.disassociate_public_ip_from_network_interface(NetworkInterfaceId=port.id)
+            if port and port.attachment and port.attachment['DeviceIndex'] != 0:
+                drv.detach_network_interface(AttachmentId = port.attachment['AttachmentId'],Force=True) #force detach as otherwise delete fails
+            else: 
+                logger.error("RWCAL-AWS: Cannot modify connection port at index 0")
+
+        # Delete the connection points. Interfaces take time to get detached from instance and so
+        # we check status before doing delete network interface 
+        for c_point in vdu_modify.connection_points_remove:
+            port = drv.get_network_interface(NetworkInterfaceId=c_point.connection_point_id)
+            retries = 0
+            if port and port.attachment and port.attachment['DeviceIndex'] == 0:
+                logger.error("RWCAL-AWS: Cannot modify connection port at index 0")
+                continue
+            while port.status == 'in-use' and retries < 10:
+                time.sleep(5)
+                port = drv.get_network_interface(NetworkInterfaceId=c_point.connection_point_id)
+            drv.delete_network_interface(port.id)
+              
+    def cleanup_vdu_on_term(self, account, server_id,vdu_port_list = None):
+        cmd = DELETE_VM_CMD.format(key    = account.aws.key,
+                                  secret  = account.aws.secret,
+                                  region  = account.aws.region,
+                                  server_id = server_id)
+        if vdu_port_list is not None:
+            for port_id in vdu_port_list:
+                cmd += (" --vdu_port_list "+ port_id)  
+
+        exec_path = 'python3 ' + os.path.dirname(aws_drv.__file__)
+        exec_cmd = exec_path+'/'+cmd
+        logger.info("Running command: %s" %(exec_cmd))
+        subprocess.call(exec_cmd, shell=True)
+        
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        """Delete a virtual deployment unit
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu to be deleted
+
+        Returns:
+            None
+        """
+        drv = self._get_driver(account)
+        ### Get list of port on VM and delete them.
+        vm_inst = drv.get_instance(vdu_id)
+       
+        port_list = drv.get_network_interface_list(InstanceId = vdu_id)
+        delete_port_list = [port.id for port in port_list if port.attachment and port.attachment['DeleteOnTermination'] is False]
+        drv.terminate_instance(vdu_id)
+
+        self.cleanup_vdu_on_term(account,vdu_id,delete_port_list)
+        
+    
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        """Get information about a virtual deployment unit.
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu
+
+        Returns:
+            Object of type RwcalYang.VDUInfoParams
+        """
+        drv = self._get_driver(account)
+
+        ### Get list of ports excluding the one for management network
+        vm = drv.get_instance(vdu_id)
+        port_list = drv.get_network_interface_list(InstanceId = vdu_id)
+        return RwcalAWSPlugin._fill_vdu_info(vm,port_list)
+        
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vdu_list(self, account):
+        """Get information about all the virtual deployment units
+
+        Arguments:
+            account     - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VDUInfoParams
+        """
+        vnf_resources = RwcalYang.VNFResources()
+        drv = self._get_driver(account)
+        vms = drv.list_instances()
+        for vm in vms:
+            ### Get list of ports excluding one for management network
+            port_list = [p for p in drv.get_network_interface_list(InstanceId = vm.id)]
+            vdu = RwcalAWSPlugin._fill_vdu_info(vm,
+                                                port_list)
+            vnf_resources.vdu_info_list.append(vdu)
+        return vnf_resources
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/CMakeLists.txt b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/CMakeLists.txt
new file mode 100644 (file)
index 0000000..06925d9
--- /dev/null
@@ -0,0 +1,34 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+include(rift_plugin)
+
+set(PKG_NAME rwcal-cloudsim)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+rift_install_python_plugin(rwcal_cloudsim rwcal_cloudsim.py)
+
+install(
+  PROGRAMS
+  etc/lxc-fedora-rift.lxctemplate
+  DESTINATION etc
+  COMPONENT ${PKG_LONG_NAME}
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/rwcal/cloudsim/__init__.py
+    rift/rwcal/cloudsim/core.py
+    rift/rwcal/cloudsim/exceptions.py
+    rift/rwcal/cloudsim/image.py
+    rift/rwcal/cloudsim/lvm.py
+    rift/rwcal/cloudsim/lxc.py
+    rift/rwcal/cloudsim/net.py
+    rift/rwcal/cloudsim/shell.py
+  PYTHON3_ONLY
+  COMPONENT ${PKG_LONG_NAME})
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/Makefile b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/Makefile
new file mode 100644 (file)
index 0000000..345c5f3
--- /dev/null
@@ -0,0 +1,24 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/__init__.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/core.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/core.py
new file mode 100644 (file)
index 0000000..69261fc
--- /dev/null
@@ -0,0 +1,355 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import functools
+
+from . import exceptions
+
+
+def unsupported(f):
+    @functools.wraps(f)
+    def impl(*args, **kwargs):
+        msg = '{} not supported'.format(f.__name__)
+        raise exceptions.RWErrorNotSupported(msg)
+
+    return impl
+
+
+class Cloud(object):
+    """
+    Cloud defines a base class for cloud driver implementations. Note that
+    not all drivers will support the complete set of functionality presented
+    here.
+    """
+
+    @unsupported
+    def get_management_network(self, account):
+        """
+        Returns the management network associated with the specified account.
+
+        @param account - a cloud account
+
+        @return a management network
+        """
+        pass
+
+    @unsupported
+    def create_tenant(self, account, name):
+        """
+        Create a new tenant.
+
+        @param account - a cloud account
+        @param name    - name to assign to the tenant.
+        """
+        pass
+
+    @unsupported
+    def delete_tenant(self, account, tenant_id):
+        """
+        delete a tenant.
+
+        @param account   - a cloud account
+        @param tenant_id - id of tenant to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_tenant_list(self, account):
+        """
+        List tenants.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_role(self, account, name):
+        """
+        Create a new role.
+
+        @param account - a cloud account
+        @param name    - name to assign to the role.
+        """
+        pass
+
+    @unsupported
+    def delete_role(self, account, role_id):
+        """
+        delete a role.
+
+        @param account - a cloud account
+        @param role_id - id of role to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_role_list(self, account):
+        """
+        List roles.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_image(self, account, image):
+        """
+        Create an image
+
+        @param account - a cloud account
+        @param image   - a description of the image to create
+        """
+        pass
+
+    @unsupported
+    def delete_image(self, account, image_id):
+        """
+        delete a vm image.
+
+        @param account  - a cloud account
+        @param image_id - Instance id of VM image to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_image_list(self, account):
+        """
+        Return a list of the names of all available images.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def get_image(self, account, image_id):
+        """
+        Returns image information.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_vm(self, account, vm):
+        """
+        Create a new virtual machine.
+
+        @param account - a cloud account
+        @param vm      - The info required to create a VM
+        """
+        pass
+
+    @unsupported
+    def start_vm(self, account, vm_id):
+        """
+        start an existing virtual machine.
+
+        @param account - a cloud account
+        @param vm_id   - The id of the VM to start
+        """
+        pass
+
+    @unsupported
+    def stop_vm(self, account, vm_id):
+        """
+        Stop a running virtual machine.
+
+        @param account - a cloud account
+        @param vm_id   - The id of the VM to stop
+        """
+        pass
+
+    @unsupported
+    def delete_vm(self, account, vm_id):
+        """
+        delete a virtual machine.
+
+        @param account - a cloud account
+        @param vm_id   - Instance id of VM to be deleted.
+        """
+        pass
+
+    @unsupported
+    def reboot_vm(self, account, vm_id):
+        """
+        reboot a virtual machine.
+
+        @param account - a cloud account
+        @param vm_id   - Instance id of VM to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_vm_list(self, account):
+        """
+        Return a list of vms.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def get_vm(self, account):
+        """
+        Return vm information.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_flavor(self, account, flavor):
+        """
+        create new flavor.
+
+        @param account - a cloud account
+        @param flavor  - Flavor object
+        """
+        pass
+
+    @unsupported
+    def delete_flavor(self, account, flavor_id):
+        """
+        Delete flavor.
+
+        @param account   - a cloud account
+        @param flavor_id - Flavor id to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_flavor_list(self, account):
+        """
+        Return a list of flavors.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def get_flavor(self, account):
+        """
+        Return flavor information.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def get_network(self, account, network_id):
+        """
+        Return a network
+
+        @param account    - a cloud account
+        @param network_id - unique network identifier
+        """
+        pass
+
+    @unsupported
+    def get_network_list(self, account):
+        """
+        Return a list of networks
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_network(self, account, network):
+        """
+        Create a new network
+
+        @param account - a cloud account
+        @param network - Network object
+        """
+        pass
+
+    @unsupported
+    def delete_network(self, account, network_id):
+        """
+        Delete a network
+
+        @param account    - a cloud account
+        @param network_id - unique network identifier
+        """
+        pass
+
+    @unsupported
+    def get_port(self, account, port_id):
+        """
+        Return a port
+
+        @param account - a cloud account
+        @param port_id - unique port identifier
+        """
+        pass
+
+    @unsupported
+    def get_port_list(self, account):
+        """
+        Return a list of ports
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_port(self, account, port):
+        """
+        Create a new port
+
+        @param account - a cloud account
+        @param port    - port object
+        """
+        pass
+
+    @unsupported
+    def delete_port(self, account, port_id):
+        """
+        Delete a port
+
+        @param account - a cloud account
+        @param port_id - unique port identifier
+        """
+        pass
+
+    @unsupported
+    def add_host(self, account, host):
+        """
+        Add a new host
+
+        @param account - a cloud account
+        @param host    - a host object
+        """
+        pass
+
+    @unsupported
+    def remove_host(self, account, host_id):
+        """
+        Remove a host
+
+        @param account - a cloud account
+        @param host_id - unique host identifier
+        """
+        pass
+
+    @unsupported
+    def get_host(self, account, host_id):
+        """
+        Return a host
+
+        @param account - a cloud account
+        @param host_id - unique host identifier
+        """
+        pass
+
+    @unsupported
+    def get_host_list(self, account):
+        """
+        Return a list of hosts
+
+        @param account - a cloud account
+        """
+        pass
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/exceptions.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/exceptions.py
new file mode 100644 (file)
index 0000000..7b426db
--- /dev/null
@@ -0,0 +1,42 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+#
+# Rift Exceptions:
+#   These exceptions each coorespond with a rift status as they are defined
+# in rwtypes.vala.  Adding them here so that errors from C transistioning
+# back to python can be handled in a pythonic manner rather than having to
+# inspect return values.
+
+class RWErrorFailure(Exception):
+  pass
+
+class RWErrorDuplicate(Exception):
+  pass
+
+class RWErrorNotFound(Exception):
+  pass
+
+class RWErrorOutOfBounds(Exception):
+  pass
+
+class RWErrorBackpressure(Exception):
+  pass
+
+class RWErrorTimeout(Exception):
+  pass
+
+class RWErrorExists(Exception):
+  pass
+
+class RWErrorNotEmpty(Exception):
+  pass
+
+class RWErrorNotConnected(Exception):
+  pass
+
+class RWErrorNotSupported(Exception):
+  pass
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/image.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/image.py
new file mode 100644 (file)
index 0000000..3b733b1
--- /dev/null
@@ -0,0 +1,28 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import math
+import re
+
+from . import shell
+
+
+class ImageInfoError(Exception):
+    pass
+
+
+def qcow2_virtual_size_mbytes(qcow2_filepath):
+    info_output = shell.command("qemu-img info {}".format(qcow2_filepath))
+    for line in info_output:
+        if line.startswith("virtual size"):
+            match = re.search("\(([0-9]*) bytes\)", line)
+            if match is None:
+                raise ImageInfoError("Could not parse image size")
+
+            num_bytes = int(match.group(1))
+            num_mbytes = num_bytes / 1024 / 1024
+            return math.ceil(num_mbytes)
+
+    raise ImageInfoError("Could not image virtual size field in output")
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lvm.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lvm.py
new file mode 100644 (file)
index 0000000..1101685
--- /dev/null
@@ -0,0 +1,268 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import collections
+import logging
+import os
+import re
+
+from . import shell
+
+
+logger = logging.getLogger(__name__)
+
+
+class PhysicalVolume(
+        collections.namedtuple(
+            "PhysicalVolume", [
+                "pv",
+                "vg",
+                "fmt",
+                "attr",
+                "psize",
+                "pfree",
+                ]
+            )
+        ):
+    pass
+
+
+class VolumeGroup(
+        collections.namedtuple(
+            "VolumeGroup", [
+                "vg",
+                "num_pv",
+                "num_lv",
+                "num_sn",
+                "attr",
+                "vsize",
+                "vfree",
+                ]
+            )
+        ):
+    pass
+
+
+class LoopbackVolumeGroup(object):
+    def __init__(self, name):
+        self._name = name
+
+    def __repr__(self):
+        return repr({
+            "name": self.name,
+            "filepath": self.filepath,
+            "loopback": self.loopback,
+            "exists": self.exists,
+            "volume_group": self.volume_group,
+            })
+
+    @property
+    def exists(self):
+        return any(v.vg == self.name for v in volume_groups())
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def filepath(self):
+        return find_backing_file(self.name)
+
+    @property
+    def loopback(self):
+        return find_loop_device(self.name)
+
+    @property
+    def volume_group(self):
+        for vgroup in volume_groups():
+            if vgroup.vg == self.name:
+                return vgroup
+
+    @property
+    def physical_volume(self):
+        for pvolume in physical_volumes():
+            if pvolume.vg == self.name:
+                return pvolume
+
+    @property
+    def size(self):
+        return os.path.getsize(self.filepath)
+
+    def extend_mbytes(self, num_mbytes):
+        """ Extend the size of the Loopback volume group
+
+        Arguments:
+            num_bytes - Number of megabytes to extend by
+        """
+
+        # Extend the size of the backing store
+        shell.command('truncate -c -s +{}M {}'.format(
+            num_mbytes, self.filepath)
+            )
+
+        # Notify loopback driver of the resized backing store
+        shell.command('losetup -c {}'.format(self.loopback))
+
+        # Expand the physical volume to match new size
+        shell.command('pvresize {}'.format(self.physical_volume.pv))
+
+
+def find_loop_device(volume):
+    pvolumes = physical_volumes()
+    for pvolume in pvolumes:
+        if pvolume.vg == volume:
+            return pvolume.pv
+
+    return None
+
+
+def find_backing_file(volume):
+    """
+    /dev/loop0: [64513]:414503 (/lvm/rift.img)
+
+    """
+    loop = find_loop_device(volume)
+    if loop is None:
+        return None
+
+    output = shell.command("losetup {}".format(loop))[0]
+    return re.search('.*\(([^)]*)\).*', output).group(1)
+
+
+def create(volume="rift", filepath="/lvm/rift.img"):
+    """
+    First, we create a loopback device using a file that we put in the file
+    system where running this from. Second, we create an LVM volume group onto
+    the loop device that was just created
+    """
+    pvolumes = physical_volumes()
+    for pvolume in pvolumes:
+        if pvolume.vg == volume:
+            raise ValueError("VolumeGroup %s already exists" % volume)
+
+    # Delete the existing backing file if it exists
+    if os.path.exists(filepath):
+        os.remove(filepath)
+
+    # Create the file that will be used as the backing store
+    if not os.path.exists(os.path.dirname(filepath)):
+        os.makedirs(os.path.dirname(filepath))
+
+    # Create a minimal file to hold any LVM physical volume metadata
+    shell.command('truncate -s 50M {}'.format(filepath))
+
+    # Acquire the next available loopback device
+    loopback = shell.command('losetup -f --show {}'.format(filepath))[0]
+
+    # Create a physical volume
+    shell.command('pvcreate {}'.format(loopback))
+
+    # Create a volume group
+    shell.command('vgcreate {} {}'.format(volume, loopback))
+
+    return LoopbackVolumeGroup(volume)
+
+
+def get(volume="rift"):
+    pvolumes = physical_volumes()
+    for pvolume in pvolumes:
+        if pvolume.vg == volume:
+            return LoopbackVolumeGroup(pvolume.vg)
+
+
+def destroy(volume="rift"):
+    pvolumes = physical_volumes()
+    for pvolume in pvolumes:
+        if pvolume.vg == volume:
+            break
+    else:
+        return
+
+    # Cache the backing file path
+    filepath = find_backing_file(volume)
+
+    # Remove the volume group
+    shell.command('vgremove -f {}'.format(pvolume.vg))
+
+    # Remove the physical volume
+    shell.command('pvremove -y {}'.format(pvolume.pv))
+
+    # Release the loopback device
+    shell.command('losetup -d {}'.format(pvolume.pv))
+
+    # Remove the backing file
+    os.remove(filepath)
+
+
+def physical_volumes():
+    """Returns a list of physical volumes"""
+    cmd = 'pvs --separator "," --rows'
+    lines = [line.strip().split(',') for line in shell.command(cmd)]
+    if not lines:
+        return []
+
+    mapping = {
+            "PV": "pv",
+            "VG": "vg",
+            "Fmt": "fmt",
+            "Attr": "attr",
+            "PSize": "psize",
+            "PFree": "pfree",
+            }
+
+    # Transpose the data so that the first element of the list is a list of
+    # keys.
+    transpose = list(map(list, zip(*lines)))
+
+    # Extract keys
+    keys = transpose[0]
+
+    # Iterate over the remaining data and create the physical volume objects
+    volumes = []
+    for values in transpose[1:]:
+        volume = {}
+        for k, v in zip(keys, values):
+            volume[mapping[k]] = v
+
+        volumes.append(PhysicalVolume(**volume))
+
+    return volumes
+
+
+def volume_groups():
+    """Returns a list of volume groups"""
+    cmd = 'vgs --separator "," --rows'
+    lines = [line.strip().split(',') for line in shell.command(cmd)]
+    if not lines:
+        return []
+
+    mapping = {
+            "VG": "vg",
+            "#PV": "num_pv",
+            "#LV": "num_lv",
+            "#SN": "num_sn",
+            "Attr": "attr",
+            "VSize": "vsize",
+            "VFree": "vfree",
+            }
+
+    # Transpose the data so that the first element of the list is a list of
+    # keys.
+    transpose = list(map(list, zip(*lines)))
+
+    # Extract keys
+    keys = transpose[0]
+
+    # Iterate over the remaining data and create the volume groups
+    groups = []
+    for values in transpose[1:]:
+        group = {}
+        for k, v in zip(keys, values):
+            group[mapping[k]] = v
+
+        groups.append(VolumeGroup(**group))
+
+    return groups
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lxc.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lxc.py
new file mode 100644 (file)
index 0000000..52eb745
--- /dev/null
@@ -0,0 +1,489 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import collections
+import contextlib
+import functools
+import logging
+import os
+import shutil
+import uuid
+
+from . import shell
+from . import image
+from . import lvm
+
+
+logger = logging.getLogger(__name__)
+
+
+class ValidationError(Exception):
+    pass
+
+
+@contextlib.contextmanager
+def mount(mountpoint, path):
+    """Mounts a device and unmounts it upon exit"""
+    shell.command('mount {} {}'.format(mountpoint, path))
+    logger.debug('mount {} {}'.format(mountpoint, path))
+    yield
+    shell.command('umount {}'.format(path))
+    logger.debug('umount {}'.format(path))
+
+
+def create_container(name, template_path, volume, rootfs_qcow2file):
+    """Create a new container
+
+    Arguments:
+        name          - the name of the new container
+        template_path - the template defines the type of container to create
+        volume        - the volume group that the container will be in
+        roots_tarfile - a path to a tarfile that contains the rootfs
+
+    Returns:
+        A Container object for the new snapshot
+
+    """
+    cmd = 'lxc-create -t {} -n {} -B lvm --fssize {}M --vgname {}'
+    cmd += " -- --rootfs-qcow2file {}".format(rootfs_qcow2file)
+    cmd += " 2>&1 | tee -a /var/log/rift_lxc.log"
+    virtual_size_mbytes = image.qcow2_virtual_size_mbytes(rootfs_qcow2file)
+
+    loop_volume = lvm.get(volume)
+    loop_volume.extend_mbytes(virtual_size_mbytes)
+
+    shell.command(cmd.format(
+        template_path, name, virtual_size_mbytes, volume
+        ))
+
+    return Container(name, volume=volume, size_mbytes=virtual_size_mbytes)
+
+
+def create_snapshot(base, name, volume, size_mbytes):
+    """Create a clone of an existing container
+
+    Arguments:
+        base     - the name of the existing container
+        name     - the name to give to the clone
+        volume   - the volume group that the container will be in
+
+    Returns:
+        A Container object for the new snapshot
+
+    """
+    cmd = '/bin/bash lxc-clone -o {} -n {} --vgname {} --snapshot'
+
+    loop_volume = lvm.get(volume)
+    loop_volume.extend_mbytes(size_mbytes)
+
+    try:
+        shell.command(cmd.format(base, name, volume))
+
+    except shell.ProcessError as e:
+        # Skip the error that occurs here. It is corrected during configuration
+        # and results from a bug in the lxc script.
+
+        # In lxc-clone, when cloning multiple times from the same container
+        # it is possible that the lvrename operation fails to rename the
+        # file in /dev/rift (but the logical volume is renamed).
+        # This logic below resolves this particular scenario.
+        if "lxc-clone: failed to mount new rootfs" in str(e):
+            os.rmdir("/dev/rift/{name}".format(name=name))
+            shutil.move("/dev/rift/{name}_snapshot".format(name=name),
+                        "/dev/rift/{name}".format(name=name)
+                        )
+
+        elif "mkdir: cannot create directory" not in str(e):
+            raise
+
+    return Container(name, volume=volume, size_mbytes=size_mbytes)
+
+
+def purge_cache():
+    """Removes any cached templates"""
+    shell.command('rm -rf /var/cache/lxc/*')
+
+
+def containers():
+    """Returns a list of containers"""
+    return [c for c in shell.command('lxc-ls') if c]
+
+
+def destroy(name):
+    """Destroys a container
+
+    Arguments:
+        name - the name of the container to destroy
+
+    """
+    shell.command('lxc-destroy -n {}'.format(name))
+
+
+def start(name):
+    """Starts a container
+
+    Arguments:
+        name - the name of the container to start
+
+    """
+    shell.command('lxc-start -d -n {} -l DEBUG'.format(name))
+
+
+def stop(name):
+    """Stops a container
+
+    Arguments
+        name - the name of the container to start
+
+    """
+    shell.command('lxc-stop -n {}'.format(name))
+
+
+def state(name):
+    """Returns the current state of a container
+
+    Arguments:
+        name - the name of the container whose state is retuned
+
+    Returns:
+        A string describing the state of the container
+
+    """
+    _, state = shell.command('lxc-info -s -n {}'.format(name))[0].split()
+    return state
+
+
+def ls():
+    """Prints the output from 'lxc-ls --fancy'"""
+    print('\n'.join(shell.command('lxc-ls --fancy')))
+
+
+def validate(f):
+    """
+    This decorator is used to check that a given container exists. If the
+    container does not exist, a ValidationError is raised.
+
+    """
+    @functools.wraps(f)
+    def impl(self, *args, **kwargs):
+        if self.name not in containers():
+            msg = 'container ({}) does not exist'.format(self.name)
+            raise ValidationError(msg)
+
+        return f(self, *args, **kwargs)
+
+    return impl
+
+
+class Container(object):
+    """
+    This class provides an interface to an existing container on the system.
+    """
+
+    def __init__(self, name, size_mbytes=4096, volume="rift", hostname=None):
+        self._name = name
+        self._size_mbytes = size_mbytes
+        self._volume = volume
+        self.hostname = name if hostname is None else hostname
+
+    @property
+    def name(self):
+        """The name of the container"""
+        return self._name
+
+    @property
+    def size(self):
+        """The virtual size of the container"""
+        return self._size_mbytes
+
+    @property
+    def volume(self):
+        """The volume that the container is a part of"""
+        return self._volume
+
+    @property
+    def loopback_volume(self):
+        """ Instance of lvm.LoopbackVolumeGroup """
+        return lvm.get(self.volume)
+
+    @property
+    @validate
+    def state(self):
+        """The current state of the container"""
+        return state(self.name)
+
+    @validate
+    def start(self):
+        """Starts the container"""
+        start(self.name)
+
+    @validate
+    def stop(self):
+        """Stops the container"""
+        stop(self.name)
+
+    @validate
+    def destroy(self):
+        """Destroys the container"""
+        destroy(self.name)
+
+    @validate
+    def info(self):
+        """Returns info about the container"""
+        return shell.command('lxc-info -n {}'.format(self.name))
+
+    @validate
+    def snapshot(self, name):
+        """Create a snapshot of this container
+
+        Arguments:
+            name - the name of the snapshot
+
+        Returns:
+            A Container representing the new snapshot
+
+        """
+        return create_snapshot(self.name, name, self.volume, self.size)
+
+    @validate
+    def configure(self, config, volume='rift', userdata=None):
+        """Configures the container
+
+        Arguments:
+            config   - a container configuration object
+            volume   - the volume group that the container will belong to
+            userdata - a string containing userdata that will be passed to
+                       cloud-init for execution
+
+        """
+        # Create the LXC config file
+        with open("/var/lib/lxc/{}/config".format(self.name), "w") as fp:
+            fp.write(str(config))
+            logger.debug('created /var/lib/lxc/{}/config'.format(self.name))
+
+        # Mount the rootfs of the container and configure the hosts and
+        # hostname files of the container.
+        rootfs = '/var/lib/lxc/{}/rootfs'.format(self.name)
+        os.makedirs(rootfs, exist_ok=True)
+
+        with mount('/dev/rift/{}'.format(self.name), rootfs):
+
+            # Create /etc/hostname
+            with open(os.path.join(rootfs, 'etc/hostname'), 'w') as fp:
+                fp.write(self.hostname + '\n')
+                logger.debug('created /etc/hostname')
+
+            # Create /etc/hostnames
+            with open(os.path.join(rootfs, 'etc/hosts'), 'w') as fp:
+                fp.write("127.0.0.1 localhost {}\n".format(self.hostname))
+                fp.write("::1 localhost {}\n".format(self.hostname))
+                logger.debug('created /etc/hosts')
+
+            # Disable autofs (conflicts with lxc workspace mount bind)
+            autofs_service_file = os.path.join(
+                    rootfs,
+                    "etc/systemd/system/multi-user.target.wants/autofs.service",
+                    )
+            if os.path.exists(autofs_service_file):
+                os.remove(autofs_service_file)
+
+            # Setup the mount points
+            for mount_point in config.mount_points:
+                mount_point_path = os.path.join(rootfs, mount_point.remote)
+                os.makedirs(mount_point_path, exist_ok=True)
+
+            # Copy the cloud-init script into the nocloud seed directory
+            if userdata is not None:
+                try:
+                    userdata_dst = os.path.join(rootfs, 'var/lib/cloud/seed/nocloud/user-data')
+                    os.makedirs(os.path.dirname(userdata_dst))
+                except FileExistsError:
+                    pass
+
+                try:
+                    with open(userdata_dst, 'w') as fp:
+                        fp.write(userdata)
+                except Exception as e:
+                    logger.exception(e)
+
+                # Cloud init requires a meta-data file in the seed location
+                metadata = "instance_id: {}\n".format(str(uuid.uuid4()))
+                metadata += "local-hostname: {}\n".format(self.hostname)
+
+                try:
+                    metadata_dst = os.path.join(rootfs, 'var/lib/cloud/seed/nocloud/meta-data')
+                    with open(metadata_dst, 'w') as fp:
+                        fp.write(metadata)
+
+                except Exception as e:
+                    logger.exception(e)
+
+
+class ContainerConfig(object):
+    """
+    This class represents the config file that is used to define the interfaces
+    on a container.
+    """
+
+    def __init__(self, name, volume='rift'):
+        self.name = name
+        self.volume = volume
+        self.networks = []
+        self.mount_points = []
+        self.cgroups = ControlGroupsConfig()
+
+    def add_network_config(self, network_config):
+        """Add a network config object
+
+        Arguments:
+            network_config - the network config object to add
+
+        """
+        self.networks.append(network_config)
+
+    def add_mount_point_config(self, mount_point_config):
+        """Add a mount point to the configuration
+
+        Arguments,
+            mount_point_config - a MountPointConfig object
+
+        """
+        self.mount_points.append(mount_point_config)
+
+    def __repr__(self):
+        fields = """
+            lxc.rootfs = /dev/{volume}/{name}
+            lxc.utsname = {utsname}
+            lxc.tty = 4
+            lxc.pts = 1024
+            lxc.mount = /var/lib/lxc/{name}/fstab
+            lxc.cap.drop = sys_module mac_admin mac_override sys_time
+            lxc.kmsg = 0
+            lxc.autodev = 1
+            lxc.kmsg = 0
+            """.format(volume=self.volume, name=self.name, utsname=self.name)
+
+        fields = '\n'.join(n.strip() for n in fields.splitlines())
+        cgroups = '\n'.join(n.strip() for n in str(self.cgroups).splitlines())
+        networks = '\n'.join(str(n) for n in self.networks)
+        mount_points = '\n'.join(str(n) for n in self.mount_points)
+
+        return '\n'.join((fields, cgroups, networks, mount_points))
+
+
+class ControlGroupsConfig(object):
+    """
+    This class represents the control group configuration for a container
+    """
+
+    def __repr__(self):
+        return """
+            #cgroups
+            lxc.cgroup.devices.deny = a
+
+            # /dev/null and zero
+            lxc.cgroup.devices.allow = c 1:3 rwm
+            lxc.cgroup.devices.allow = c 1:5 rwm
+
+            # consoles
+            lxc.cgroup.devices.allow = c 5:1 rwm
+            lxc.cgroup.devices.allow = c 5:0 rwm
+            lxc.cgroup.devices.allow = c 4:0 rwm
+            lxc.cgroup.devices.allow = c 4:1 rwm
+
+            # /dev/{,u}random
+            lxc.cgroup.devices.allow = c 1:9 rwm
+            lxc.cgroup.devices.allow = c 1:8 rwm
+            lxc.cgroup.devices.allow = c 136:* rwm
+            lxc.cgroup.devices.allow = c 5:2 rwm
+
+            # rtc
+            lxc.cgroup.devices.allow = c 254:0 rm
+            """
+
+
+class NetworkConfig(collections.namedtuple(
+    "NetworkConfig", [
+        "type",
+        "link",
+        "flags",
+        "name",
+        "veth_pair",
+        "ipv4",
+        "ipv4_gateway",
+        ]
+    )):
+    """
+    This class represents a network interface configuration for a container.
+    """
+
+    def __new__(cls,
+            type,
+            link,
+            name,
+            flags='up',
+            veth_pair=None,
+            ipv4=None,
+            ipv4_gateway=None,
+            ):
+        return super(NetworkConfig, cls).__new__(
+                cls,
+                type,
+                link,
+                flags,
+                name,
+                veth_pair,
+                ipv4,
+                ipv4_gateway,
+                )
+
+    def __repr__(self):
+        fields = [
+                "lxc.network.type = {}".format(self.type),
+                "lxc.network.link = {}".format(self.link),
+                "lxc.network.flags = {}".format(self.flags),
+                "lxc.network.name = {}".format(self.name),
+                ]
+
+        if self.veth_pair is not None:
+            fields.append("lxc.network.veth.pair = {}".format(self.veth_pair))
+
+        if self.ipv4 is not None:
+            fields.append("lxc.network.ipv4 = {}/24".format(self.ipv4))
+
+        if self.ipv4_gateway is not None:
+            fields.append("lxc.network.ipv4.gateway = {}".format(self.ipv4_gateway))
+
+        header = ["# Start {} configuration".format(self.name)]
+        footer = ["# End {} configuration\n".format(self.name)]
+
+        return '\n'.join(header + fields + footer)
+
+
+class MountConfig(collections.namedtuple(
+    "ContainerMountConfig", [
+        "local",
+        "remote",
+        "read_only",
+        ]
+    )):
+    """
+    This class represents a mount point configuration for a container.
+    """
+
+    def __new__(cls, local, remote, read_only=True):
+        return super(MountConfig, cls).__new__(
+                cls,
+                local,
+                remote,
+                read_only,
+                )
+
+    def __repr__(self):
+        return "lxc.mount.entry = {} {} none {}bind 0 0\n".format(
+                self.local,
+                self.remote,
+                "" if not self.read_only else "ro,"
+                )
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/net.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/net.py
new file mode 100644 (file)
index 0000000..7c7786b
--- /dev/null
@@ -0,0 +1,135 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import collections
+
+import netifaces
+
+from . import shell
+
+
+class VirshError(Exception):
+    pass
+
+
+def create(network, ip_interface=None):
+    """ Create, assign ip and bring up a bridge interface
+
+    Arguments:
+        network - The network name
+        ip_interface - An ipaddress.IPv4Interface instance
+    """
+    bridge_add(network)
+    if ip_interface is not None:
+        bridge_addr(
+                network,
+                str(ip_interface),
+                str(ip_interface.network.broadcast_address),
+                )
+    bridge_up(network)
+
+
+def delete(network):
+    bridge_down(network)
+    bridge_remove(network)
+
+
+def bridge_add(network):
+    shell.command("brctl addbr {network}".format(network=network))
+
+
+def bridge_remove(network):
+    shell.command("brctl delbr {network}".format(network=network))
+
+
+def bridge_addr(network, addr, broadcast):
+    cmd = "ip addr add {addr} broadcast {broadcast} dev {network}"
+    shell.command(cmd.format(addr=addr, broadcast=broadcast, network=network))
+
+
+def bridge_exists(network):
+    return network in netifaces.interfaces()
+
+
+def bridge_down(network):
+    shell.command('ip link set {network} down'.format(network=network))
+
+
+def bridge_up(network):
+    shell.command('ip link set {network} up'.format(network=network))
+
+
+def bridge_addresses(network):
+    try:
+        address = netifaces.ifaddresses(network)[netifaces.AF_INET][0]
+
+    except KeyError:
+        raise ValueError('unable to find subnet for {}'.format(network))
+
+    cls = collections.namedtuple('BridgeAddresses', 'addr netmask broadcast')
+    return cls(**address)
+
+
+VirshNetwork = collections.namedtuple(
+    'VirshNetwork', 'name state autostart persistant')
+
+
+def virsh_list_networks():
+    lines = shell.command('virsh net-list --all')
+    if len(lines) < 2:
+        raise Exception("Expected two lines from virsh net-list output")
+
+    network_lines = lines[2:]
+    virsh_networks = []
+    for line in network_lines:
+        if not line.strip():
+            continue
+
+        (name, state, autostart, persistant) = line.split()
+        virsh_networks.append(
+                VirshNetwork(name, state, autostart, persistant)
+                )
+
+    return virsh_networks
+
+
+def virsh_list_network_names():
+    virsh_networks = virsh_list_networks()
+    return [n.name for n in virsh_networks]
+
+
+def virsh_is_active(network_name):
+    virsh_networks = virsh_list_networks()
+    for network in virsh_networks:
+        if network.name == network_name:
+            return network.state == "active"
+
+    raise VirshError("Did not find virsh network %s" % network_name)
+
+
+def virsh_define_default():
+    shell.command('virsh net-define /usr/share/libvirt/networks/default.xml')
+
+
+def virsh_start(network_name):
+    shell.command('virsh net-start %s' % network_name)
+
+
+def virsh_initialize_default():
+    if "default" not in virsh_list_network_names():
+        virsh_define_default()
+
+    if virsh_is_active("default"):
+        if bridge_exists("virbr0"):
+            bridge_down("virbr0")
+
+        virsh_destroy("default")
+
+    virsh_start("default")
+
+
+def virsh_destroy(network_name):
+    shell.command('virsh net-destroy %s' % network_name)
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/shell.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/shell.py
new file mode 100644 (file)
index 0000000..86a9463
--- /dev/null
@@ -0,0 +1,34 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import logging
+import subprocess
+
+
+logger = logging.getLogger(__name__)
+
+
+class ProcessError(Exception):
+    pass
+
+
+def command(cmd):
+    logger.debug('executing: {}'.format(cmd))
+
+    process = subprocess.Popen(
+            cmd,
+            shell=True,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            )
+
+    stdout, stderr = process.communicate()
+    process.wait()
+
+    if process.returncode != 0:
+        raise ProcessError(stderr.decode())
+
+    return stdout.decode().splitlines()
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rwcal_cloudsim.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/rwcal_cloudsim.py
new file mode 100644 (file)
index 0000000..0b7797b
--- /dev/null
@@ -0,0 +1,1385 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import collections
+import itertools
+import logging
+import os
+import uuid
+
+import ipaddress
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang,
+    )
+
+import rw_status
+import rwlogger
+
+import rift.rwcal.cloudsim.lxc as lxc
+import rift.rwcal.cloudsim.lvm as lvm
+import rift.rwcal.cloudsim.net as net
+import rift.rwcal.cloudsim.exceptions as exceptions
+
+logger = logging.getLogger('rwcal.cloudsim')
+
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+class ImageLocationError(Exception):
+    pass
+
+
+class CreateNetworkError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class Resources(object):
+    def __init__(self):
+        self.images = dict()
+
+
+def rwcal_copy_object(obj):
+    dup = obj.__class__()
+    dup.copy_from(obj)
+    return dup
+
+
+MGMT_NETWORK_NAME = "virbr0"
+MGMT_NETWORK_INTERFACE_IP = ipaddress.IPv4Interface("192.168.122.1/24")
+
+
+class IPPoolError(Exception):
+    pass
+
+
+class NetworkIPPool(object):
+    def __init__(self, subnet):
+        self._network = ipaddress.IPv4Network(subnet)
+        self._ip_gen = self._network.hosts()
+        self._allocated_ips = []
+        self._unallocated_ips = []
+
+    def allocate_ip(self):
+        try:
+            ip = str(next(self._ip_gen))
+        except StopIteration:
+            try:
+                ip = self._unallocated_ips.pop()
+            except IndexError:
+                raise IPPoolError("All ip addresses exhausted")
+
+        self._allocated_ips.append(ip)
+        return ip
+
+    def deallocate_ip(self, ip):
+        if ip not in self._allocated_ips:
+            raise ValueError("Did not find IP %s in allocate ip pool")
+
+        self._allocated_ips.remove(ip)
+        self._unallocated_ips.append(ip)
+
+
+class CalManager(object):
+    def __init__(self):
+        self._vms = {}
+        self._ports = {}
+        self._images = {}
+        self._networks = {}
+        self.flavors = {}
+
+        self._port_to_vm = {}
+        self._vm_to_image = {}
+        self._port_to_network = {}
+        self._network_to_ip_pool = {}
+
+        self._vm_to_ports = collections.defaultdict(list)
+        self._image_to_vms = collections.defaultdict(list)
+        self._network_to_ports = collections.defaultdict(list)
+
+        self._vm_id_gen = itertools.count(1)
+        self._network_id_gen = itertools.count(1)
+        self._image_id_gen = itertools.count(1)
+
+    def add_image(self, image):
+        image_id = str(next(self._image_id_gen))
+        self._images[image_id] = image
+
+        return image_id
+
+    def remove_image(self, image_id):
+        for vm_id in self.get_image_vms(image_id):
+            self.remove_vm(vm_id)
+
+        del self._images[image_id]
+        del self._image_to_vms[image_id]
+
+    def get_image(self, image_id):
+        if image_id not in self._images:
+            msg = "Unable to find image {}"
+            raise exceptions.RWErrorNotFound(msg.format(image_id))
+
+        return self._images[image_id]
+
+    def get_image_list(self):
+        return list(self._images.values())
+
+    def get_image_vms(self, image_id):
+        if image_id not in self._images:
+            msg = "Unable to find image {}"
+            raise exceptions.RWErrorNotFound(msg.format(image_id))
+
+        return self._image_to_vms[image_id]
+
+    def add_port(self, network_id, vm_id, port):
+        if network_id not in self._networks:
+            msg = "Unable to find network {}"
+            raise exceptions.RWErrorNotFound(msg.format(network_id))
+
+        if vm_id not in self._vms:
+            msg = "Unable to find vm {}"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        port_id = str(uuid.uuid4())
+        self._ports[port_id] = port
+
+        self._vm_to_ports[vm_id].append(port_id)
+        self._network_to_ports[network_id].append(port_id)
+
+        self._port_to_vm[port_id] = vm_id
+        self._port_to_network[port_id] = network_id
+
+        return port_id
+
+    def remove_port(self, port_id):
+        if port_id not in self._ports:
+            msg = "Unable to find port {}"
+            raise exceptions.RWErrorNotFound(msg.format(port_id))
+
+        network_id = self._port_to_network[port_id]
+        vm_id = self._port_to_vm[port_id]
+
+        self._vm_to_ports[vm_id].remove(port_id)
+        self._network_to_ports[network_id].remove(port_id)
+
+        del self._ports[port_id]
+        del self._port_to_vm[port_id]
+        del self._port_to_network[port_id]
+
+    def get_port(self, port_id):
+        return self._ports[port_id]
+
+    def get_port_list(self):
+        return list(self._ports.values())
+
+    def add_network(self, network):
+        network_id = str(next(self._network_id_gen))
+        self._networks[network_id] = network
+
+        return network_id
+
+    def remove_network(self, network_id):
+        for port_id in self.get_network_ports(network_id):
+            self.remove_port(port_id)
+
+        del self._networks[network_id]
+
+    def get_network(self, network_id):
+        return self._networks[network_id]
+
+    def add_network_ip_pool(self, network_id, ip_pool):
+        self._network_to_ip_pool[network_id] = ip_pool
+
+    def get_network_ip_pool(self, network_id):
+        return self._network_to_ip_pool[network_id]
+
+    def remove_network_ip_pool(self, network_id):
+        del self._network_to_ip_pool[network_id]
+
+    def get_network_list(self):
+        return list(self._networks.values())
+
+    def get_network_ports(self, network_id):
+        return self._network_to_ports[network_id]
+
+    def add_vm(self, image_id, vm):
+        if image_id not in self._images:
+            msg = "Unable to find image {}"
+            raise exceptions.RWErrorNotFound(msg.format(image_id))
+
+        vm_id = str(next(self._vm_id_gen))
+        self._vms[vm_id] = vm
+
+        self._vm_to_image[vm_id] = image_id
+        self._image_to_vms[image_id].append(vm_id)
+
+        return vm_id
+
+    def remove_vm(self, vm_id):
+        for port_id in self.get_vm_ports(vm_id):
+            self.remove_port(port_id)
+
+        image_id = self._vm_to_image[vm_id]
+
+        self._image_to_vms[image_id].remove(vm_id)
+
+        del self._vms[vm_id]
+        del self._vm_to_image[vm_id]
+
+    def get_vm(self, vm_id):
+        return self._vms[vm_id]
+
+    def get_vm_list(self):
+        return list(self._vms.values())
+
+    def get_vm_ports(self, vm_id):
+        return self._vm_to_ports[vm_id]
+
+
+class LxcManager(object):
+    def __init__(self):
+        self._containers = {}
+        self._ports = {}
+        self._bridges = {}
+
+        self._port_to_container = {}
+        self._port_to_bridge = {}
+
+        self._container_to_ports = collections.defaultdict(list)
+        self._bridge_to_ports = collections.defaultdict(list)
+
+        # Create the management network
+        self.mgmt_network = RwcalYang.NetworkInfoItem()
+        self.mgmt_network.network_name = MGMT_NETWORK_NAME
+
+        network = MGMT_NETWORK_INTERFACE_IP.network
+        self.mgmt_network.subnet = str(network)
+
+        # Create/Start the default virtd network for NAT-based
+        # connectivity inside containers (http://wiki.libvirt.org/page/Networking)
+        if "default" not in net.virsh_list_network_names():
+            logger.debug("default virtd network not found.  Creating.")
+            net.virsh_define_default()
+
+            # The default virsh profile create a virbr0 interface
+            # with a 192.168.122.1 ip address.  Also sets up iptables
+            # for NAT access.
+            net.virsh_start("default")
+
+        # Create the IP pool
+        mgmt_network_hosts = network.hosts()
+
+        # Remove the management interface ip from the pool
+        self._mgmt_ip_pool = list(mgmt_network_hosts)
+        self._mgmt_ip_pool.remove(MGMT_NETWORK_INTERFACE_IP.ip)
+
+    def acquire_mgmt_ip(self):
+        """Returns an IP address from the available pool"""
+        # TODO these ips will need to be recycled at some point
+        return str(self._mgmt_ip_pool.pop())
+
+    def add_port(self, bridge_id, container_id, port):
+        if bridge_id not in self._bridges:
+            msg = "Unable to find bridge {}"
+            raise exceptions.RWErrorNotFound(msg.format(bridge_id))
+
+        if container_id not in self._containers:
+            msg = "Unable to find container {}"
+            raise exceptions.RWErrorNotFound(msg.format(container_id))
+
+        port_id = str(uuid.uuid4())
+        self._ports[port_id] = port
+
+        self._container_to_ports[container_id].append(port_id)
+        self._bridge_to_ports[bridge_id].append(port_id)
+
+        self._port_to_container[port_id] = container_id
+        self._port_to_bridge[port_id] = bridge_id
+
+        return port_id
+
+    def remove_port(self, port_id):
+        if port_id not in self._ports:
+            msg = "Unable to find port {}"
+            raise exceptions.RWErrorNotFound(msg.format(port_id))
+
+        bridge_id = self._port_to_bridge[port_id]
+        container_id = self._port_to_container[port_id]
+
+        self._container_to_ports[container_id].remove(port_id)
+        self._bridge_to_ports[bridge_id].remove(port_id)
+
+        del self._ports[port_id]
+        del self._port_to_bridge[port_id]
+        del self._port_to_container[port_id]
+
+    def get_port(self, port_id):
+        return self._ports[port_id]
+
+    def add_bridge(self, bridge):
+        bridge_id = str(uuid.uuid4())
+        self._bridges[bridge_id] = bridge
+
+        return bridge_id
+
+    def remove_bridge(self, bridge_id):
+        for port_id in self._bridge_to_ports[bridge_id]:
+            self.remove_port(port_id)
+
+        del self._bridges[bridge_id]
+
+    def get_bridge(self, bridge_id):
+        return self._bridges[bridge_id]
+
+    def get_bridge_ports(self, bridge_id):
+        port_ids = self._bridge_to_ports[bridge_id]
+        return [self.get_port(port_id) for port_id in port_ids]
+
+    def add_container(self, container):
+        container_id = str(uuid.uuid4())
+        self._containers[container_id] = container
+
+        return container_id
+
+    def remove_container(self, container_id):
+        for port_id in self.get_container_ports(container_id):
+            self.remove_port(port_id)
+
+        del self._containers[container_id]
+
+    def get_container(self, container_id):
+        return self._containers[container_id]
+
+    def get_container_ports(self, container_id):
+        return self._container_to_ports[container_id]
+
+
+
+class Datastore(object):
+    """
+    This class is used to store data that is shared among different instance of
+    the Container class.
+    """
+    def __init__(self):
+        self.lxc_manager = LxcManager()
+        self.cal_manager = CalManager()
+        self.cal_to_lxc = {'image': {}, 'port': {}, 'network': {}, 'vm': {}}
+        self.last_index = 0
+
+
+class CloudSimPlugin(GObject.Object, RwCal.Cloud):
+    # HACK this is a work-around for sharing/persisting container information.
+    # This will only work for instances of CloudSimPlugin that are within the
+    # same process. Thus, it works in collapsed mode, but will not work in
+    # expanded mode. At the point where it is necessary to persist this
+    # information in expanded mode, we will need to find a better solution.
+    datastore = None
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        if CloudSimPlugin.datastore is None:
+            CloudSimPlugin.datastore = Datastore()
+
+    @property
+    def lxc(self):
+        return CloudSimPlugin.datastore.lxc_manager
+
+    @property
+    def cal(self):
+        return CloudSimPlugin.datastore.cal_manager
+
+    @property
+    def volume_group(self):
+        return lvm.get("rift")
+
+    @property
+    def cal_to_lxc(self):
+        return CloudSimPlugin.datastore.cal_to_lxc
+
+    def next_snapshot_name(self):
+        """Generates a new snapshot name for a container"""
+        CloudSimPlugin.datastore.last_index += 1
+        return 'rws{}'.format(CloudSimPlugin.datastore.last_index)
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="cloudsim",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        If creds are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus(
+                status="success",
+                details=""
+                )
+
+        return status
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        """Returns the management network
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            a NetworkInfo object
+
+        """
+        return self.lxc.mgmt_network
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        """
+        Create a new tenant.
+
+        @param name     - name to assign to the tenant.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """
+        delete a tenant.
+
+        @param tenant_id     - id of tenant to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """
+        List tenants.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        """
+        Create a new role.
+
+        @param name         - name to assign to the role.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """
+        delete a role.
+
+        @param role_id     - id of role to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """
+        List roles.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        """Create a new image
+
+        Creates a new container based upon the template and tarfile specified.
+        Only one image is currently supported for a given instance of the CAL.
+
+        Arguments:
+            account - a cloud account
+            image   - an ImageInfo object
+
+        Raises:
+            An RWErrorDuplicate is raised if create_image is called and there
+            is already an image.
+
+        Returns:
+            The UUID of the new image
+
+        """
+        current_images = self.cal.get_image_list()
+        lxc_name = "rwm{}".format(len(current_images))
+
+        if not image.has_field("disk_format"):
+            logger.warning("Image disk format not provided assuming qcow2")
+            image.disk_format = "qcow2"
+
+        if image.disk_format not in ["qcow2"]:
+            msg = "Only qcow2 currently supported for container CAL"
+            raise exceptions.RWErrorNotSupported(msg)
+
+        # Create the base container
+        if "REUSE_LXC" in os.environ and lxc_name == "rwm0":
+            logger.info("REUSE_LXC set.  Not creating rwm0")
+            container = lxc.Container(lxc_name)
+        else:
+            container = lxc.create_container(
+                    name=lxc_name,
+                    template_path=os.path.join(
+                            os.environ['RIFT_INSTALL'],
+                            "etc/lxc-fedora-rift.lxctemplate",
+                            ),
+                    volume="rift",
+                    rootfs_qcow2file=image.location,
+                    )
+
+        # Add the images to the managers
+        cal_image_id = self.cal.add_image(image)
+        lxc_image_id = self.lxc.add_container(container)
+
+        # Create the CAL to LXC mapping
+        self.cal_to_lxc["image"][cal_image_id] = lxc_image_id
+
+        image.id = cal_image_id
+
+        return image.id
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """Deletes an image
+
+        This function will remove the record of the image from the CAL and
+        destroy the associated container.
+
+        Arguments:
+            account  - a cloud account
+            image_id - the UUID of the image to delete
+
+        Raises:
+            An RWErrorNotEmpty exception is raised if there are VMs based on
+            this image (the VMs need to be deleted first). An RWErrorNotFound
+            is raised if the image_id does not match any of the known images.
+
+        """
+        container_id = self.cal_to_lxc["image"][image_id]
+        container = self.lxc.get_container(container_id)
+
+        # Stop the image and destroy it (NB: it should not be necessary to stop
+        # the container, but just in case)
+        container.stop()
+        container.destroy()
+
+        self.cal.remove_image(image_id)
+        self.lxc.remove_container(container_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        """Returns the specified image
+
+        Arguments:
+            account  - a cloud account
+            image_id - the UUID of the image to retrieve
+
+        Raises:
+            An RWErrorNotFound exception is raised if the image_id does not
+            match any of the known images.
+
+        Returns:
+            An image object
+
+        """
+        return self.cal.get_image(image_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """Returns a list of images"""
+        resources = RwcalYang.VimResources()
+        for image in self.cal.get_image_list():
+            resources.imageinfo_list.append(rwcal_copy_object(image))
+
+        return resources
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        """Create a VM
+
+        Arguments:
+            vm - the VM info used to define the desire VM
+
+        Raises:
+            An RWErrorFailure is raised if there is not
+
+        Returns:
+            a string containing the unique id of the created VM
+
+        """
+        # Retrieve the container that will be used as the base of the snapshot
+        container_id = self.cal_to_lxc["image"][vm.image_id]
+        container = self.lxc.get_container(container_id)
+
+        # Create a container snapshot
+        snapshot = container.snapshot(self.next_snapshot_name())
+        snapshot.hostname = vm.vm_name
+
+        # Register the vm and container
+        snapshot_id = self.lxc.add_container(snapshot)
+        vm.vm_id = self.cal.add_vm(vm.image_id, vm)
+
+        self.cal_to_lxc["vm"][vm.vm_id] = snapshot_id
+
+        return vm.vm_id
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """Starts the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to start
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        if vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        container_id = self.cal_to_lxc["vm"][vm_id]
+
+        snapshot = self.lxc.get_container(container_id)
+        port_ids = self.lxc.get_container_ports(container_id)
+
+        config = lxc.ContainerConfig(snapshot.name)
+
+        for port_id in port_ids:
+            port = self.lxc.get_port(port_id)
+            config.add_network_config(port)
+
+        vm = self.cal.get_vm(vm_id)
+
+        # Set the management IP on the vm if not yet set
+        if not vm.has_field("management_ip"):
+            mgmt_ip = self.lxc.acquire_mgmt_ip()
+            vm.management_ip = mgmt_ip
+
+        # Add the management interface
+        config.add_network_config(
+                lxc.NetworkConfig(
+                    type="veth",
+                    link=self.lxc.mgmt_network.network_name,
+                    name="eth0",
+                    ipv4=vm.management_ip,
+                    ipv4_gateway='auto',
+                    )
+                )
+
+        # Add rift root as a mount point
+        config.add_mount_point_config(
+            lxc.MountConfig(
+                local=os.environ["RIFT_ROOT"],
+                remote=os.environ["RIFT_ROOT"][1:],
+                read_only=False,
+                )
+            )
+
+        userdata=None
+        if vm.cloud_init.has_field("userdata"):
+            userdata = vm.cloud_init.userdata
+
+        snapshot.configure(config, userdata=userdata)
+        snapshot.start()
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """Stops the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to stop
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        if vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        # Stop the container
+        container_id = self.cal_to_lxc["vm"][vm_id]
+        snapshot = self.lxc.get_container(container_id)
+        snapshot.stop()
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """Deletes the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        if vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        container_id = self.cal_to_lxc["vm"][vm_id]
+
+        snapshot = self.lxc.get_container(container_id)
+        snapshot.stop()
+        snapshot.destroy()
+
+        self.cal.remove_vm(vm_id)
+        self.lxc.remove_container(container_id)
+
+        # TODO: Recycle management ip
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """
+        reboot a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        self.do_stop_vm(account, vm_id, no_rwstatus=True)
+        self.do_start_vm(account, vm_id, no_rwstatus=True)
+
+    @rwstatus
+    def do_get_vm(self, account, vm_id):
+        """Returns the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to return
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        Returns:
+            a VMInfoItem object
+
+        """
+        if vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        return self.cal.get_vm(vm_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        """Returns the a list of the VMs known to the driver
+
+        Returns:
+            a list of VMInfoItem objects
+
+        """
+        resources = RwcalYang.VimResources()
+        for vm in self.cal.get_vm_list():
+            resources.vminfo_list.append(rwcal_copy_object(vm))
+
+        return resources
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        """
+        create new flavor.
+
+        @param flavor   - Flavor object
+        """
+        flavor_id = str(uuid.uuid4())
+        self.cal.flavors[flavor_id] = flavor
+        logger.debug('Created flavor: {}'.format(flavor_id))
+        return flavor_id
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """
+        Delete flavor.
+
+        @param flavor_id     - Flavor id to be deleted.
+        """
+        logger.debug('Deleted flavor: {}'.format(flavor_id))
+        self.cal.flavors.pop(flavor_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        """
+        Return the specified flavor
+
+        @param flavor_id - the id of the flavor to return
+        """
+        flavor = self.cal.flavors[flavor_id]
+        logger.debug('Returning flavor-info for : {}'.format(flavor_id))
+        return flavor
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """
+        Return a list of flavors
+        """
+        vim_resources = RwcalYang.VimResources()
+        for flavor in self.cal.flavors.values():
+            f = RwcalYang.FlavorInfoItem()
+            f.copy_from(flavor)
+            vim_resources.flavorinfo_list.append(f)
+        logger.debug("Returning list of flavor-info of size: %d", len(vim_resources.flavorinfo_list))
+        return vim_resources
+
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        """Create a port between a network and a virtual machine
+
+        Arguments:
+            account - a cloud account
+            port    - a description of port to create
+
+        Raises:
+            Raises an RWErrorNotFound exception if either the network or the VM
+            associated with the port cannot be found.
+
+        Returns:
+            the ID of the newly created port.
+
+        """
+        if port.network_id not in self.cal_to_lxc["network"]:
+            msg = 'Unable to find the specified network ({})'
+            raise exceptions.RWErrorNotFound(msg.format(port.network_id))
+
+        if port.vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(port.vm_id))
+
+        if port.has_field("ip_address"):
+            raise exceptions.RWErrorFailure("IP address of the port must not be specific")
+
+        network = self.cal.get_network(port.network_id)
+        ip_pool = self.cal.get_network_ip_pool(port.network_id)
+        port.ip_address = ip_pool.allocate_ip()
+
+        net_config = lxc.NetworkConfig(
+                type='veth',
+                link=network.network_name[:15],
+                name="veth" + str(uuid.uuid4())[:10],
+                ipv4=port.ip_address,
+                )
+
+        lxc_network_id = self.cal_to_lxc["network"][port.network_id]
+        lxc_vm_id = self.cal_to_lxc["vm"][port.vm_id]
+
+        cal_port_id = self.cal.add_port(port.network_id, port.vm_id, port)
+        lxc_port_id = self.lxc.add_port(lxc_network_id, lxc_vm_id, net_config)
+
+        self.cal_to_lxc["port"][cal_port_id] = lxc_port_id
+        port.port_id = cal_port_id
+
+        return port.port_id
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        """Delete the specified port
+
+        Arguments:
+            account - a cloud account
+            port_id - the ID of the port to delete
+
+        Raises:
+            A RWErrorNotFound exception is raised if the specified port cannot
+            be found.
+
+        """
+        if port_id not in self.cal_to_lxc["port"]:
+            msg = "Unable to find the specified port ({})"
+            raise exceptions.RWErrorNotFound(msg.format(port_id))
+
+        lxc_port_id = self.cal_to_lxc["port"][port_id]
+
+        # Release the port's ip address back into the network pool
+        port = self.cal.get_port(port_id)
+        ip_pool = self.cal.get_network_ip_pool(port.network_id)
+        ip_pool.deallocate_ip(port.ip_address)
+
+        self.cal.remove_port(port_id)
+        self.lxc.remove_port(lxc_port_id)
+
+        del self.cal_to_lxc["port"][port_id]
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        """Return the specified port
+
+        Arguments:
+            account - a cloud account
+            port_id - the ID of the port to return
+
+        Raises:
+            A RWErrorNotFound exception is raised if the specified port cannot
+            be found.
+
+        Returns:
+            The specified port.
+
+        """
+        if port_id not in self.cal_to_lxc["port"]:
+            msg = "Unable to find the specified port ({})"
+            raise exceptions.RWErrorNotFound(msg.format(port_id))
+
+        return self.cal.get_port(port_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        """Returns a list of ports"""
+        resources = RwcalYang.VimResources()
+        for port in self.datastore.cal_manager.get_port_list():
+            resources.portinfo_list.append(rwcal_copy_object(port))
+
+        return resources
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        """Create a network
+
+        Arguments:
+            account - a cloud account
+            network - a description of the network to create
+
+        Returns:
+            The ID of the newly created network
+
+        """
+
+        # Create the network
+        try:
+            # Setup a pool of mgmt IPv4 addresses
+            if net.bridge_exists(network.network_name):
+                logger.warning("Bridge %s already exists.  Removing.", network.network_name)
+                net.bridge_down(network.network_name)
+                net.bridge_remove(network.network_name)
+
+            # Ensure that the subnet field was filled out and is valid
+            if not network.has_field("subnet"):
+                raise CreateNetworkError("subnet not provided in create network request")
+
+            try:
+                ipaddress.IPv4Network(network.subnet)
+            except ValueError as e:
+                raise CreateNetworkError("Could not convert subnet into a "
+                                         "IPv4Network: %s" % str(network.subnet))
+
+            ip_pool = NetworkIPPool(network.subnet)
+
+            # Create the management bridge with interface information
+            net.create(network.network_name)
+
+        except Exception as e:
+            logger.warning(str(e))
+
+        # Register the network
+        cal_network_id = self.cal.add_network(network)
+        lxc_network_id = self.lxc.add_bridge(network)
+        self.cal.add_network_ip_pool(cal_network_id, ip_pool)
+
+        self.cal_to_lxc["network"][cal_network_id] = lxc_network_id
+
+        # Set the ID of the network object
+        network.network_id = cal_network_id
+
+        return network.network_id
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        """
+        Arguments:
+            account    - a cloud account
+            network_id - the UUID of the network to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified network cannot be
+            found.
+
+        """
+        if network_id not in self.cal_to_lxc["network"]:
+            msg = "Unable to find the specified network ({})"
+            raise exceptions.RWErrorNotFound(msg.format(network_id))
+
+        # Get the associated bridge ID
+        bridge_id = self.cal_to_lxc["network"][network_id]
+
+        # Delete the network
+        network = self.cal.get_network(network_id)
+        net.delete(network.network_name)
+
+        # Remove the network records
+        self.lxc.remove_bridge(bridge_id)
+        self.cal.remove_network(network_id)
+        del self.cal_to_lxc["network"][network_id]
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        """Returns the specified network
+
+        Arguments:
+            account    - a cloud account
+            network_id - the UUID of the network to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified network cannot be
+            found.
+
+        Returns:
+            The specified network
+
+        """
+        return self.cal.get_network(network_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        """Returns a list of network objects"""
+        resources = RwcalYang.VimResources()
+        for network in self.cal.get_network_list():
+            resources.networkinfo_list.append(rwcal_copy_object(network))
+
+        return resources
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        network = RwcalYang.NetworkInfoItem()
+        network.network_name = link_params.name
+        network.subnet = link_params.subnet
+
+        if link_params.has_field("provider_network"):
+            logger.warning("Container CAL does not implement provider network")
+
+        rs, net_id = self.do_create_network(account, network)
+        if rs != RwTypes.RwStatus.SUCCESS:
+            raise exceptions.RWErrorFailure(rs)
+
+        return net_id
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        """Delete a virtual link
+
+        Arguments:
+            account - a cloud account
+            link_id - id for the virtual-link to be deleted
+
+        Returns:
+            None
+        """
+
+        network_ports = self.cal.get_network_ports(link_id)
+        for port_id in network_ports:
+            self.do_delete_port(account, port_id, no_rwstatus=True)
+
+        self.do_delete_network(account, link_id, no_rwstatus=True)
+
+    @staticmethod
+    def fill_connection_point_info(c_point, port_info):
+        """Create a GI object for RwcalYang.VDUInfoParams_ConnectionPoints()
+
+        Converts Port information dictionary object returned by container cal
+        driver into Protobuf Gi Object
+
+        Arguments:
+            port_info - Port information from container cal
+        Returns:
+            Protobuf Gi object for RwcalYang.VDUInfoParams_ConnectionPoints
+        """
+        c_point.name = port_info.port_name
+        c_point.connection_point_id = port_info.port_id
+        c_point.ip_address = port_info.ip_address
+        c_point.state = 'active'
+        c_point.virtual_link_id = port_info.network_id
+        c_point.vdu_id = port_info.vm_id
+
+    @staticmethod
+    def create_virtual_link_info(network_info, port_list):
+        """Create a GI object for VirtualLinkInfoParams
+
+        Converts Network and Port information dictionary object
+        returned by container manager into Protobuf Gi Object
+
+        Arguments:
+            network_info - Network information from container cal
+            port_list - A list of port information from container cal
+            subnet: Subnet information from openstack
+        Returns:
+            Protobuf Gi object for VirtualLinkInfoParams
+        """
+        link = RwcalYang.VirtualLinkInfoParams()
+        link.name = network_info.network_name
+        link.state = 'active'
+        link.virtual_link_id = network_info.network_id
+        for port in port_list:
+            c_point = link.connection_points.add()
+            CloudSimPlugin.fill_connection_point_info(c_point, port)
+
+        link.subnet = network_info.subnet
+
+        return link
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        """Get information about virtual link.
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link
+
+        Returns:
+            Object of type RwcalYang.VirtualLinkInfoParams
+        """
+
+        network = self.do_get_network(account, link_id, no_rwstatus=True)
+        port_ids = self.cal.get_network_ports(network.network_id)
+        ports = [self.cal.get_port(p_id) for p_id in port_ids]
+
+        virtual_link = CloudSimPlugin.create_virtual_link_info(
+                network, ports
+                )
+
+        return virtual_link
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link_list(self, account):
+        """Get information about all the virtual links
+
+        Arguments:
+            account  - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VirtualLinkInfoParams
+        """
+        networks = self.do_get_network_list(account, no_rwstatus=True)
+        vnf_resources = RwcalYang.VNFResources()
+        for network in networks.networkinfo_list:
+            virtual_link = self.do_get_virtual_link(account, network.network_id, no_rwstatus=True)
+            vnf_resources.virtual_link_info_list.append(virtual_link)
+
+        return vnf_resources
+
+    def _create_connection_point(self, account, c_point, vdu_id):
+        """
+        Create a connection point
+        Arguments:
+           account  - a cloud account
+           c_point  - connection_points
+        """
+        port = RwcalYang.PortInfoItem()
+        port.port_name = c_point.name
+        port.network_id = c_point.virtual_link_id
+        port.port_type = 'normal' ### Find Port type from network_profile under cloud account
+        port.vm_id = vdu_id
+        port_id = self.do_create_port(account, port, no_rwstatus=True)
+        return port_id
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        """Create a new virtual deployment unit
+
+        Arguments:
+            account     - a cloud account
+            vdu_init  - information about VDU to create (RwcalYang.VDUInitParams)
+
+        Returns:
+            The vdu_id
+        """
+        ### Create VM
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_name = vdu_init.name
+        vm.image_id = vdu_init.image_id
+        if vdu_init.vdu_init.has_field('userdata'):
+            vm.cloud_init.userdata = vdu_init.vdu_init.userdata
+        vm.user_tags.node_id = vdu_init.node_id
+
+        vm_id = self.do_create_vm(account, vm, no_rwstatus=True)
+
+        ### Now create required number of ports aka connection points
+        port_list = []
+        for c_point in vdu_init.connection_points:
+            virtual_link_id = c_point.virtual_link_id
+
+            # Attempt to fetch the network to verify that the network
+            # already exists.
+            self.do_get_network(account, virtual_link_id, no_rwstatus=True)
+
+            port_id = self._create_connection_point(account, c_point, vm_id)
+            port_list.append(port_id)
+
+        # Finally start the vm
+        self.do_start_vm(account, vm_id, no_rwstatus=True)
+
+        return vm_id
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        """Modify Properties of existing virtual deployment unit
+
+        Arguments:
+            account     -  a cloud account
+            vdu_modify  -  Information about VDU Modification (RwcalYang.VDUModifyParams)
+        """
+        ### First create required number of ports aka connection points
+        port_list = []
+        network_list = []
+        if not vdu_modify.has_field("vdu_id"):
+            raise ValueError("vdu_id must not be empty")
+
+        for c_point in vdu_modify.connection_points_add:
+            if not c_point.has_field("virtual_link_id"):
+                raise ValueError("virtual link id not provided")
+
+            network_list.append(c_point.virtual_link_id)
+            port_id = self._create_connection_point(account, c_point, vdu_modify.vdu_id)
+            port_list.append(port_id)
+
+        ### Delete the requested connection_points
+        for c_point in vdu_modify.connection_points_remove:
+            self.do_delete_port(account, c_point.connection_point_id, no_rwstatus=True)
+
+        self.do_reboot_vm(account, vdu_modify.vdu_id)
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        """Delete a virtual deployment unit
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu to be deleted
+
+        Returns:
+            None
+        """
+        ### Get list of port on VM and delete them.
+        port_id_list = self.cal.get_vm_ports(vdu_id)
+        ports = [self.cal.get_port(p_id) for p_id in port_id_list]
+        for port in ports:
+            self.do_delete_port(account, port.port_id, no_rwstatus=True)
+        self.do_delete_vm(account, vdu_id, no_rwstatus=True)
+
+    @staticmethod
+    def fill_vdu_info(vm_info, port_list):
+        """create a gi object for vduinfoparams
+
+        converts vm information dictionary object returned by openstack
+        driver into protobuf gi object
+
+        arguments:
+            vm_info - vm information from openstack
+            mgmt_network - management network
+            port_list - a list of port information from container cal
+        returns:
+            protobuf gi object for vduinfoparams
+        """
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.name = vm_info.vm_name
+        vdu.vdu_id = vm_info.vm_id
+        vdu.management_ip = vm_info.management_ip
+        vdu.public_ip = vm_info.management_ip
+        vdu.node_id = vm_info.user_tags.node_id
+        vdu.image_id = vm_info.image_id
+        vdu.state = 'active'
+
+        # fill the port information
+        for port in port_list:
+            c_point = vdu.connection_points.add()
+            CloudSimPlugin.fill_connection_point_info(c_point, port)
+
+        vdu.vm_flavor.vcpu_count = 1
+        vdu.vm_flavor.memory_mb = 8 * 1024 # 8GB
+        vdu.vm_flavor.storage_gb = 10
+
+        return vdu
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        """Get information about a virtual deployment unit.
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu
+
+        Returns:
+            Object of type RwcalYang.VDUInfoParams
+        """
+        port_id_list = self.cal.get_vm_ports(vdu_id)
+        ports = [self.cal.get_port(p_id) for p_id in port_id_list]
+        vm_info = self.do_get_vm(account, vdu_id, no_rwstatus=True)
+        vdu_info = CloudSimPlugin.fill_vdu_info(vm_info, ports)
+
+        return vdu_info
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu_list(self, account):
+        """Get information about all the virtual deployment units
+
+        Arguments:
+            account     - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VDUInfoParams
+        """
+
+        vnf_resources = RwcalYang.VNFResources()
+
+        vm_resources = self.do_get_vm_list(account, no_rwstatus=True)
+        for vm in vm_resources.vminfo_list:
+            port_list = self.cal.get_vm_ports(vm.vm_id)
+            vdu = CloudSimPlugin.fill_vdu_info(vm, port_list)
+            vnf_resources.vdu_info_list.append(vdu)
+
+        return vnf_resources
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/test/cloudsim_module_test.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsim/test/cloudsim_module_test.py
new file mode 100755 (executable)
index 0000000..090dd27
--- /dev/null
@@ -0,0 +1,210 @@
+#!/usr/bin/env python3
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+
+import logging
+import os
+import ipaddress
+import unittest
+import uuid
+import sys
+from gi.repository import RwcalYang
+
+import rift.rwcal.cloudsim.lvm as lvm
+import rift.rwcal.cloudsim.lxc as lxc
+
+sys.path.append('../')
+import rwcal_cloudsim
+
+
+logger = logging.getLogger('rwcal-cloudsim')
+
+
+class CloudsimTest(unittest.TestCase):
+    @classmethod
+    def cleanUp(cls):
+        for container in lxc.containers():
+            lxc.stop(container)
+
+        for container in lxc.containers():
+            lxc.destroy(container)
+
+        #lvm.destroy("rift")
+
+    @classmethod
+    def create_image(cls):
+        image = RwcalYang.ImageInfoItem()
+        image.name = "rift-lxc-image"
+        image.location = "/net/sharedfiles/home1/common/vm/R0.4/rift-mano-devel-latest.qcow2"
+        image.disk_format = "qcow2"
+        image.id = cls.cal.do_create_image(cls.account, image, no_rwstatus=True)
+
+        cls.image = image
+
+    @classmethod
+    def setUpClass(cls):
+        cls.cleanUp()
+
+        lvm.create("rift")
+        cls.account = RwcalYang.CloudAccount()
+        cls.cal = rwcal_cloudsim.CloudSimPlugin()
+        cls.create_image()
+
+    def setUp(self):
+        pass
+
+    def create_vm(self, image, index):
+        vm = RwcalYang.VmInfo()
+        vm.vm_name = 'rift-s{}'.format(index + 1)
+        vm.image_id = image.id
+        vm.user_tags.node_id = str(uuid.uuid4())
+
+        self.cal.do_create_vm(self.account, vm, no_rwstatus=True)
+
+        return vm
+
+    def create_virtual_link(self, index):
+        link = RwcalYang.VirtualLinkReqParams()
+        link.name = 'link-{}'.format(index + 1)
+        link.subnet = '192.168.{}.0/24'.format(index + 1)
+
+        logger.debug("Creating virtual link: %s", link)
+
+        link_id = self.cal.do_create_virtual_link(self.account, link, no_rwstatus=True)
+        return link, link_id
+
+    def create_vdu(self, image, index, virtual_link_ids=None):
+        vdu_init = RwcalYang.VDUInitParams()
+        vdu_init.name = 'rift-vdu{}'.format(index + 1)
+        vdu_init.node_id = str(uuid.uuid4())
+        vdu_init.image_id = image.id
+
+        if virtual_link_ids is not None:
+            for vl_id in virtual_link_ids:
+                cp = vdu_init.connection_points.add()
+                cp.name = "{}_{}".format(vdu_init.name, vl_id)
+                cp.virtual_link_id = vl_id
+
+        vdu_id = self.cal.do_create_vdu(self.account, vdu_init, no_rwstatus=True)
+
+        return vdu_init, vdu_id
+
+    def test_create_vm(self):
+        self.create_vm(self.image, 0)
+
+    def test_create_delete_virtual_link(self):
+        link, link_id = self.create_virtual_link(0)
+        get_link = self.cal.do_get_virtual_link(self.account, link_id, no_rwstatus=True)
+        assert get_link.name == link.name
+        assert get_link.virtual_link_id == link_id
+        assert len(get_link.connection_points) == 0
+        assert get_link.state == "active"
+
+        resources = self.cal.do_get_virtual_link_list(self.account, no_rwstatus=True)
+        assert len(resources.virtual_link_info_list) == 1
+        assert resources.virtual_link_info_list[0] == get_link
+
+        self.cal.do_delete_virtual_link(self.account, link_id, no_rwstatus=True)
+        resources = self.cal.do_get_virtual_link_list(self.account, no_rwstatus=True)
+        assert len(resources.virtual_link_info_list) == 0
+
+    def test_create_delete_vdu(self):
+        vdu, vdu_id = self.create_vdu(self.image, 0)
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+
+        assert get_vdu.image_id == self.image.id
+        assert get_vdu.name == vdu.name
+        assert get_vdu.node_id == vdu.node_id
+
+        assert len(get_vdu.connection_points) == 0
+
+        assert get_vdu.vm_flavor.vcpu_count >= 1
+        assert get_vdu.vm_flavor.memory_mb >= 8 * 1024
+        assert get_vdu.vm_flavor.storage_gb >= 5
+
+        resources = self.cal.do_get_vdu_list(self.account, no_rwstatus=True)
+        assert len(resources.vdu_info_list) == 1
+        assert resources.vdu_info_list[0] == get_vdu
+
+        resources = self.cal.do_delete_vdu(self.account, vdu_id, no_rwstatus=True)
+
+        resources = self.cal.do_get_vdu_list(self.account, no_rwstatus=True)
+        assert len(resources.vdu_info_list) == 0
+
+    def test_create_vdu_single_connection_point(self):
+        link, link_id = self.create_virtual_link(0)
+        vdu, vdu_id = self.create_vdu(self.image, 0, [link_id])
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 1
+        cp = get_vdu.connection_points[0]
+        assert (ipaddress.IPv4Address(cp.ip_address) in
+                ipaddress.IPv4Network(link.subnet))
+
+        get_link = self.cal.do_get_virtual_link(self.account, link_id, no_rwstatus=True)
+        assert len(get_link.connection_points) == 1
+        assert get_link.connection_points[0].vdu_id == vdu_id
+        assert get_link.connection_points[0].virtual_link_id == link_id
+
+        self.cal.do_delete_vdu(self.account, vdu_id, no_rwstatus=True)
+        get_link = self.cal.do_get_virtual_link(self.account, link_id, no_rwstatus=True)
+        assert len(get_link.connection_points) == 0
+
+        self.cal.do_delete_virtual_link(self.account, link_id)
+
+    def test_create_vdu_multiple_connection_point(self):
+        link1, link1_id = self.create_virtual_link(0)
+        link2, link2_id = self.create_virtual_link(1)
+        link3, link3_id = self.create_virtual_link(2)
+        link_id_map = {link1_id: link1, link2_id: link2, link3_id: link3}
+
+        vdu, vdu_id = self.create_vdu(self.image, 0, link_id_map.keys())
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 3
+        for cp in get_vdu.connection_points:
+            assert cp.virtual_link_id in link_id_map
+            link = link_id_map[cp.virtual_link_id]
+
+            assert (ipaddress.IPv4Address(cp.ip_address) in
+                    ipaddress.IPv4Network(link.subnet))
+
+        self.do_delete_vdu(self.account, vdu_id, no_rwstatus=True)
+
+        self.do_delete_virtual_link(self.account, link1_id, no_rwstatus=True)
+        self.do_delete_virtual_link(self.account, link2_id, no_rwstatus=True)
+        self.do_delete_virtual_link(self.account, link3_id, no_rwstatus=True)
+
+    def test_modify_vdu_add_remove_connection_point(self):
+        vdu, vdu_id = self.create_vdu(self.image, 0)
+        link, link_id = self.create_virtual_link(0)
+
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 0
+
+        modify_vdu = RwcalYang.VDUModifyParams()
+        modify_vdu.vdu_id = vdu_id
+        cp = modify_vdu.connection_points_add.add()
+        cp.virtual_link_id = link_id
+        cp.name = "link_1"
+        self.cal.do_modify_vdu(self.account, modify_vdu, no_rwstatus=True)
+
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 1
+
+        modify_vdu = RwcalYang.VDUModifyParams()
+        modify_vdu.vdu_id = vdu_id
+        cp = modify_vdu.connection_points_remove.add()
+        cp.connection_point_id = get_vdu.connection_points[0].connection_point_id
+        self.cal.do_modify_vdu(self.account, modify_vdu, no_rwstatus=True)
+
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 0
+
+        self.cal.do_delete_vdu(self.account, vdu_id, no_rwstatus=True)
+        self.cal.do_delete_virtual_link(self.account, link_id, no_rwstatus=True)
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.DEBUG)
+    unittest.main()
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsimproxy/CMakeLists.txt b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsimproxy/CMakeLists.txt
new file mode 100644 (file)
index 0000000..36da9dc
--- /dev/null
@@ -0,0 +1,15 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+include(rift_plugin)
+
+set(PKG_NAME rwcal-cloudsimproxy)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+
+rift_install_python_plugin(rwcal_cloudsimproxy rwcal_cloudsimproxy.py)
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsimproxy/Makefile b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsimproxy/Makefile
new file mode 100644 (file)
index 0000000..345c5f3
--- /dev/null
@@ -0,0 +1,24 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsimproxy/rwcal_cloudsimproxy.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_cloudsimproxy/rwcal_cloudsimproxy.py
new file mode 100644 (file)
index 0000000..dcda6ed
--- /dev/null
@@ -0,0 +1,647 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import logging
+
+import requests
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang,
+    )
+
+import rw_status
+import rwlogger
+
+logger = logging.getLogger('rwcal.cloudsimproxy')
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class CloudsimProxyError(Exception):
+    pass
+
+
+class CloudSimProxyPlugin(GObject.Object, RwCal.Cloud):
+    DEFAULT_PROXY_HOST = "localhost"
+    DEFAULT_PROXY_PORT = 9002
+
+    def __init__(self):
+        self._session = None
+        self._host = None
+        self._port = CloudSimProxyPlugin.DEFAULT_PROXY_PORT
+
+    @property
+    def session(self):
+        if self._session is None:
+            self._session = requests.Session()
+
+        return self._session
+
+    @property
+    def host(self):
+        return self._host
+
+    @host.setter
+    def host(self, host):
+        if self._host is not None:
+            if host != self._host:
+                raise CloudsimProxyError("Cloudsim host changed during execution")
+
+        self._host = host
+
+    def _set_host_from_account(self, account):
+        self.host = account.cloudsim_proxy.host
+
+    def _proxy_rpc_call(self, api, **kwargs):
+        url = "http://{host}:{port}/api/{api}".format(
+                host=self._host,
+                port=self._port,
+                api=api,
+                )
+
+        post_dict = {}
+        for key, val in kwargs.items():
+            post_dict[key] = val
+
+        logger.debug("Sending post to url %s with json data: %s", url, post_dict)
+        r = self.session.post(url, json=post_dict)
+        r.raise_for_status()
+
+        response_dict = r.json()
+        logger.debug("Got json response: %s", response_dict)
+
+        return_vals = []
+        for return_val in response_dict["return_vals"]:
+            value = return_val["value"]
+            proto_type = return_val["proto_type"]
+            if proto_type is not None:
+                gi_cls = getattr(RwcalYang, proto_type)
+                logger.debug("Deserializing into %s", proto_type)
+                gi_obj = gi_cls.from_dict(value)
+                value = gi_obj
+
+            return_vals.append(value)
+
+        logger.debug("Returning RPC return values: %s", return_vals)
+
+        if len(return_vals) == 0:
+            return None
+
+        elif len(return_vals) == 1:
+            return return_vals[0]
+
+        else:
+            return tuple(return_vals[1:])
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        logger.addHandler(
+            rwlogger.RwLogger(
+                category="cloudsimproxy",
+                log_hdl=rwlog_ctx,
+            )
+        )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        """Returns the management network
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            a NetworkInfo object
+
+        """
+
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_management_network")
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        """
+        Create a new tenant.
+
+        @param name     - name to assign to the tenant.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """
+        delete a tenant.
+
+        @param tenant_id     - id of tenant to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """
+        List tenants.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        """
+        Create a new role.
+
+        @param name         - name to assign to the role.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """
+        delete a role.
+
+        @param role_id     - id of role to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """
+        List roles.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        """Create a new image
+
+        Creates a new container based upon the template and tarfile specified.
+        Only one image is currently supported for a given instance of the CAL.
+
+        Arguments:
+            account - a cloud account
+            image   - an ImageInfo object
+
+        Raises:
+            An RWErrorDuplicate is raised if create_image is called and there
+            is already an image.
+
+        Returns:
+            The UUID of the new image
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_image", image=image.as_dict())
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """Deletes an image
+
+        This function will remove the record of the image from the CAL and
+        destroy the associated container.
+
+        Arguments:
+            account  - a cloud account
+            image_id - the UUID of the image to delete
+
+        Raises:
+            An RWErrorNotEmpty exception is raised if there are VMs based on
+            this image (the VMs need to be deleted first). An RWErrorNotFound
+            is raised if the image_id does not match any of the known images.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_management_network")
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        """Returns the specified image
+
+        Arguments:
+            account  - a cloud account
+            image_id - the UUID of the image to retrieve
+
+        Raises:
+            An RWErrorNotFound exception is raised if the image_id does not
+            match any of the known images.
+
+        Returns:
+            An image object
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_image", image_id=image_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """Returns a list of images"""
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_image_list")
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        """Create a VM
+
+        Arguments:
+            vm - the VM info used to define the desire VM
+
+        Raises:
+            An RWErrorFailure is raised if there is not
+
+        Returns:
+            a string containing the unique id of the created VM
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_vm", vm=vm.as_dict())
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """Starts the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to start
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("start_vm", vm_id=vm_id)
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """Stops the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to stop
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("stop_vm", vm_id=vm_id)
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """Deletes the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_vm", vm_id=vm_id)
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """
+        reboot a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("reboot_vm", vm_id=vm_id)
+
+    @rwstatus
+    def do_get_vm(self, account, vm_id):
+        """Returns the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to return
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        Returns:
+            a VMInfoItem object
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_vm", vm_id=vm_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        """Returns the a list of the VMs known to the driver
+
+        Returns:
+            a list of VMInfoItem objects
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_vm_list")
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        """
+        create new flavor.
+
+        @param flavor   - Flavor object
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_flavor", flavor=flavor.as_dict())
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """
+        Delete flavor.
+
+        @param flavor_id     - Flavor id to be deleted.
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_flavor", flavor_id=flavor_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        """
+        Return the specified flavor
+
+        @param flavor_id - the id of the flavor to return
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_flavor", flavor_id=flavor_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """
+        Return a list of flavors
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_flavor_list")
+
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        """Create a port between a network and a virtual machine
+
+        Arguments:
+            account - a cloud account
+            port    - a description of port to create
+
+        Raises:
+            Raises an RWErrorNotFound exception if either the network or the VM
+            associated with the port cannot be found.
+
+        Returns:
+            the ID of the newly created port.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_port", port=port.as_dict())
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        """Delete the specified port
+
+        Arguments:
+            account - a cloud account
+            port_id - the ID of the port to delete
+
+        Raises:
+            A RWErrorNotFound exception is raised if the specified port cannot
+            be found.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_port", port_id=port_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        """Return the specified port
+
+        Arguments:
+            account - a cloud account
+            port_id - the ID of the port to return
+
+        Raises:
+            A RWErrorNotFound exception is raised if the specified port cannot
+            be found.
+
+        Returns:
+            The specified port.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_port", port_id=port_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        """Returns a list of ports"""
+
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_port_list")
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        """Create a network
+
+        Arguments:
+            account - a cloud account
+            network - a description of the network to create
+
+        Returns:
+            The ID of the newly created network
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_network", network=network.as_dict())
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        """
+        Arguments:
+            account    - a cloud account
+            network_id - the UUID of the network to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified network cannot be
+            found.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_network", network_id=network_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        """Returns the specified network
+
+        Arguments:
+            account    - a cloud account
+            network_id - the UUID of the network to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified network cannot be
+            found.
+
+        Returns:
+            The specified network
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_network", network_id=network_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        """Returns a list of network objects"""
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_network_list")
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_virtual_link", link_params=link_params.as_dict())
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        """Get information about virtual link.
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link
+
+        Returns:
+            Object of type RwcalYang.VirtualLinkInfoParams
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_virtual_link", link_id=link_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_virtual_link_list(self, account):
+        """Returns the a list of the Virtual links
+
+        Returns:
+            a list of RwcalYang.VirtualLinkInfoParams objects
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_virtual_link_list")
+
+    @rwstatus(ret_on_failure=[None])
+    def do_delete_virtual_link(self, account, link_id):
+        """Delete the virtual link
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_virtual_link", link_id=link_id)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_virtual_link", link_params=link_params.as_dict())
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        """Create a new virtual deployment unit
+
+        Arguments:
+            account     - a cloud account
+            vdu_init  - information about VDU to create (RwcalYang.VDUInitParams)
+
+        Returns:
+            The vdu_id
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_vdu", vdu_params=vdu_init.as_dict())
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        """Modify Properties of existing virtual deployment unit
+
+        Arguments:
+            account     -  a cloud account
+            vdu_modify  -  Information about VDU Modification (RwcalYang.VDUModifyParams)
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("modify_vdu", vdu_params=vdu_modify.as_dict())
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        """Delete a virtual deployment unit
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu to be deleted
+
+        Returns:
+            None
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_vdu", vdu_id=vdu_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        """Get information about a virtual deployment unit.
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu
+
+        Returns:
+            Object of type RwcalYang.VDUInfoParams
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_vdu", vdu_id=vdu_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu_list(self, account):
+        """Get information about all the virtual deployment units
+
+        Arguments:
+            account     - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VDUInfoParams
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_vdu_list")
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_mock/CMakeLists.txt b/modules/core/rwvx/rwcal/plugins/vala/rwcal_mock/CMakeLists.txt
new file mode 100644 (file)
index 0000000..35b9141
--- /dev/null
@@ -0,0 +1,15 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+include(rift_plugin)
+
+### rwcal-mock package
+set(PKG_NAME rwcal-mock)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+
+rift_install_python_plugin(rwcal_mock rwcal_mock.py)
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_mock/Makefile b/modules/core/rwvx/rwcal/plugins/vala/rwcal_mock/Makefile
new file mode 100644 (file)
index 0000000..345c5f3
--- /dev/null
@@ -0,0 +1,24 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_mock/rwcal_mock.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_mock/rwcal_mock.py
new file mode 100644 (file)
index 0000000..d5f478c
--- /dev/null
@@ -0,0 +1,590 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import random
+import socket
+import struct
+import collections
+import logging
+import os
+import uuid
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+import rw_status
+import rwlogger
+
+logger = logging.getLogger('rwcal.mock')
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+class ImageLocationError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class Resources(object):
+    def __init__(self):
+        self.images = dict()
+        self.vlinks = dict()
+        self.vdus  = dict()
+        self.flavors = dict()
+
+class MockPlugin(GObject.Object, RwCal.Cloud):
+    """This class implements the abstract methods in the Cloud class.
+    Mock is used for unit testing."""
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self.resources = collections.defaultdict(Resources)
+
+    @staticmethod
+    def get_uuid(name):
+        if name == None:
+            raise ValueError("Name can not be None")
+        return str(uuid.uuid3(uuid.NAMESPACE_DNS, name))
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rwcal.mock",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+        account = RwcalYang.CloudAccount()
+        account.name = 'mock_account'
+        account.account_type = 'mock'
+        account.mock.username = 'mock_user'
+        self.create_default_resources(account)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        If creds are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus(
+                status="success",
+                details=""
+                )
+
+        return status
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        """
+        Returns the management network
+
+        @param account - a cloud account
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        """
+        Create a new tenant.
+
+        @param name     - name to assign to the tenant.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """
+        delete a tenant.
+
+        @param tenant_id     - id of tenant to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """
+        List tenants.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        """
+        Create a new role.
+
+        @param name         - name to assign to the role.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """
+        delete a role.
+
+        @param role_id     - id of role to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """
+        List roles.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        """
+        Create a VM image
+
+        @param account - cloud account information
+        @param image   - information about the image
+        """
+        if image.location is None:
+            raise ImageLocationError("uninitialized image location")
+
+        if not os.path.exists(image.location):
+            raise MissingFileError("{} does not exist".format(image.location))
+
+        image.id = self.get_uuid(image.name)
+        self.resources[account.name].images[image.id] = image
+        logger.debug('created image: {}'.format(image.id))
+        return image.id
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """
+        delete a vm image.
+
+        @param image_id     - Instance id of VM image to be deleted.
+        """
+        if account.name not in self.resources:
+            raise UnknownAccountError()
+
+        del self.resources[account.name].images[image_id]
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        return self.resources[account.name].images[image_id]
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """
+        Return a list of the names of all available images.
+        """
+        boxed_image_list = RwcalYang.VimResources()
+        for image in self.resources[account.name].images.values():
+            image_entry = RwcalYang.ImageInfoItem()
+            image_entry.id = image.id
+            image_entry.name = image.name
+            if image.has_field('checksum'):
+                image_entry.checksum = image.checksum
+            boxed_image_list.imageinfo_list.append(image_entry)
+
+        return boxed_image_list
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        """
+        Create a new virtual machine.
+
+        @param name     - name to assign to the VM.  This does not have to be unique.
+        @param image    - name of image to load on the VM.
+        @param size     - name of the size of the VM to create.
+        @param location - name of the location to launch the VM in.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """
+        Start a virtual machine.
+
+        @param vm_id - id of VM to start
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """
+        Stop a virtual machine.
+
+        @param vm_id - id of VM to stop
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """
+        delete a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """
+        reboot a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        """
+        create new flavor.
+
+        @param flavor   - Flavor object
+        """
+        flavor_id = self.get_uuid(flavor.name)
+        self.resources[account.name].flavors[flavor_id] = flavor
+        logger.debug('Created flavor: {}'.format(flavor_id))
+        return flavor_id
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """
+        Delete flavor.
+
+        @param flavor_id     - Flavor id to be deleted.
+        """
+        logger.debug('Deleted flavor: {}'.format(flavor_id))
+        self.resources[account.name].flavors.pop(flavor_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        """
+        Return the specified flavor
+
+        @param flavor_id - the id of the flavor to return
+        """
+        flavor = self.resources[account.name].flavors[flavor_id]
+        logger.debug('Returning flavor-info for : {}'.format(flavor_id))
+        return flavor
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """
+        Return a list of flavors
+        """
+        vim_resources = RwcalYang.VimResources()
+        for flavor in self.resources[account.name].flavors.values():
+            f = RwcalYang.FlavorInfoItem()
+            f.copy_from(flavor)
+            vim_resources.flavorinfo_list.append(f)
+        logger.debug("Returning list of flavor-info of size: %d", len(vim_resources.flavorinfo_list))
+        return vim_resources
+
+
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        raise NotImplementedError()
+
+    def create_default_resources(self, account):
+        """
+        Create default resources
+        """
+        link_list = []
+        ### Add virtual links
+        for i in range(2):
+            vlink = RwcalYang.VirtualLinkReqParams()
+            vlink.name = 'link-'+str(i)
+            vlink.subnet = '10.0.0.0/24'
+            rs, vlink_id = self.do_create_virtual_link(account, vlink)
+            assert vlink_id != ''
+            logger.debug("Creating static virtual-link with name: %s", vlink.name)
+            link_list.append(vlink_id)
+
+        ### Add VDUs
+        for i in range(2):
+            vdu = RwcalYang.VDUInitParams()
+            vdu.name = 'vdu-'+str(i)
+            vdu.node_id = str(i)
+            vdu.image_id = self.get_uuid('image-'+str(i))
+            vdu.flavor_id = self.get_uuid('flavor'+str(i))
+            vdu.vm_flavor.vcpu_count = 4
+            vdu.vm_flavor.memory_mb = 4096*2
+            vdu.vm_flavor.storage_gb = 40
+            for j in range(2):
+                c = vdu.connection_points.add()
+                c.name = vdu.name+'-port-'+str(j)
+                c.virtual_link_id = link_list[j]
+            rs, vdu_id = self.do_create_vdu(account, vdu)
+            assert vdu_id != ''
+            logger.debug("Creating static VDU with name: %s", vdu.name)
+
+        for i in range(2):
+            flavor = RwcalYang.FlavorInfoItem()
+            flavor.name = 'flavor-'+str(i)
+            flavor.vm_flavor.vcpu_count = 4
+            flavor.vm_flavor.memory_mb = 4096*2
+            flavor.vm_flavor.storage_gb = 40
+            rc, flavor_id = self.do_create_flavor(account, flavor)
+
+        for i in range(2):
+            image = RwcalYang.ImageInfoItem()
+            image.name = "rwimage"
+            image.id = self.get_uuid('image-'+str(i))
+            image.checksum = self.get_uuid('rwimage'+str(i))
+            image.location = "/dev/null"
+            rc, image_id = self.do_create_image(account, image)
+
+        image = RwcalYang.ImageInfoItem()
+        image.name = "Fedora-x86_64-20-20131211.1-sda.qcow2"
+        image.id = self.get_uuid(image.name)
+        image.checksum = self.get_uuid(image.name)
+        image.location = "/dev/null"
+        rc, image_id = self.do_create_image(account, image)
+
+        image = RwcalYang.ImageInfoItem()
+        image.name = "Fedora-x86_64-20-20131211.1-sda-ping.qcow2"
+        image.id = self.get_uuid(image.name)
+        image.checksum = self.get_uuid(image.name)
+        image.location = "/dev/null"
+        rc, image_id = self.do_create_image(account, image)
+
+        image = RwcalYang.ImageInfoItem()
+        image.name = "Fedora-x86_64-20-20131211.1-sda-pong.qcow2"
+        image.id = self.get_uuid(image.name)
+        image.checksum = self.get_uuid(image.name)
+        image.location = "/dev/null"
+        rc, image_id = self.do_create_image(account, image)
+
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        vlink_id = self.get_uuid(link_params.name)
+        vlink = RwcalYang.VirtualLinkInfoParams()
+        vlink.name = link_params.name
+        vlink.state = 'active'
+        vlink.virtual_link_id = vlink_id
+        vlink.subnet = link_params.subnet
+        vlink.connection_points = []
+        for field in link_params.provider_network.fields:
+            if link_params.provider_network.has_field(field):
+                setattr(vlink.provider_network, field, getattr(link_params.provider_network, field))
+
+        self.resources[account.name].vlinks[vlink_id] = vlink
+        logger.debug('created virtual-link: {}'.format(vlink_id))
+        return vlink_id
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        self.resources[account.name].vlinks.pop(link_id)
+        logger.debug('deleted virtual-link: {}'.format(link_id))
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        vlink = self.resources[account.name].vlinks[link_id]
+        logger.debug('Returning virtual-link-info for : {}'.format(link_id))
+        return vlink
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_virtual_link_list(self, account):
+        vnf_resources = RwcalYang.VNFResources()
+        for r in self.resources[account.name].vlinks.values():
+            vlink = RwcalYang.VirtualLinkInfoParams()
+            vlink.copy_from(r)
+            vnf_resources.virtual_link_info_list.append(vlink)
+        logger.debug("Returning list of virtual-link-info of size: %d", len(vnf_resources.virtual_link_info_list))
+        return vnf_resources
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        vdu_id = self.get_uuid(vdu_init.name)
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.vdu_id = vdu_id
+        vdu.name = vdu_init.name
+        vdu.node_id = vdu_init.node_id
+        vdu.image_id = vdu_init.image_id
+        if vdu_init.has_field('flavor_id'):
+            vdu.flavor_id = vdu_init.flavor_id
+
+        if vdu_init.has_field('vm_flavor'):
+            xx = vdu.vm_flavor.new()
+            xx.from_pbuf(vdu_init.vm_flavor.to_pbuf())
+            vdu.vm_flavor = xx
+
+        if vdu_init.has_field('guest_epa'):
+            xx = vdu.guest_epa.new()
+            xx.from_pbuf(vdu_init.guest_epa.to_pbuf())
+            vdu.guest_epa = xx
+
+        if vdu_init.has_field('vswitch_epa'):
+            xx = vdu.vswitch_epa.new()
+            xx.from_pbuf(vdu_init.vswitch_epa.to_pbuf())
+            vdu.vswitch_epa = xx
+
+        if vdu_init.has_field('hypervisor_epa'):
+            xx = vdu.hypervisor_epa.new()
+            xx.from_pbuf(vdu_init.hypervisor_epa.to_pbuf())
+            vdu.hypervisor_epa = xx
+
+        if vdu_init.has_field('host_epa'):
+            xx = vdu.host_epa.new()
+            xx.from_pbuf(vdu_init.host_epa.to_pbuf())
+            vdu.host_epa = xx
+
+        vdu.state = 'active'
+        vdu.management_ip = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
+        vdu.public_ip = vdu.management_ip
+
+        for c in vdu_init.connection_points:
+            p = vdu.connection_points.add()
+            p.connection_point_id = self.get_uuid(c.name)
+            p.name = c.name
+            p.vdu_id = vdu_id
+            p.state = 'active'
+            p.ip_address = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
+            p.virtual_link_id = c.virtual_link_id
+            # Need to add this connection_point to virtual link
+            vlink = self.resources[account.name].vlinks[c.virtual_link_id]
+            v = vlink.connection_points.add()
+            for field in p.fields:
+                if p.has_field(field):
+                    setattr(v, field, getattr(p, field))
+
+        self.resources[account.name].vdus[vdu_id] = vdu
+        logger.debug('Created vdu: {}'.format(vdu_id))
+        return vdu_id
+
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        vdu = self.resources[account.name].vdus[vdu_modify.vdu_id]
+        for c in vdu_modify.connection_points_add:
+            p = vdu.connection_points.add()
+            p.connection_point_id = self.get_uuid(c.name)
+            p.name = c.name
+            p.vdu_id = vdu.vdu_id
+            p.state = 'active'
+            p.ip_address = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
+            p.virtual_link_id = c.virtual_link_id
+            # Need to add this connection_point to virtual link
+            vlink = self.resources[account.name].vlinks[c.virtual_link_id]
+            aa = RwcalYang.VirtualLinkInfoParams_ConnectionPoints()
+            aa.connection_point_id = p.connection_point_id
+            aa.name = p.name
+            aa.virtual_link_id = vlink.virtual_link_id
+            aa.state = 'active'
+            aa.ip_address = p.ip_address
+            aa.vdu_id = p.vdu_id
+            vlink.connection_points.append(aa)
+
+        for c in vdu_modify.connection_points_remove:
+            for d in vdu.connection_points:
+                if c.connection_point_id == d.connection_point_id:
+                    vdu.connection_points.remove(d)
+                    break
+            for k, vlink in self.resources[account.name].vlinks.items():
+                for z in vlink.connection_points:
+                    if z.connection_point_id == c.connection_point_id:
+                        vlink.connection_points.remove(z)
+                        break
+        logger.debug('modified vdu: {}'.format(vdu_modify.vdu_id))
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        vdu = self.resources[account.name].vdus.pop(vdu_id)
+        for c in vdu.connection_points:
+            vlink = self.resources[account.name].vlinks[c.virtual_link_id]
+            z = [p for p in vlink.connection_points if p.connection_point_id == c.connection_point_id]
+            assert len(z) == 1
+            vlink.connection_points.remove(z[0])
+
+        logger.debug('deleted vdu: {}'.format(vdu_id))
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        vdu = self.resources[account.name].vdus[vdu_id]
+        logger.debug('Returning vdu-info for : {}'.format(vdu_id))
+        return vdu.copy()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_vdu_list(self, account):
+        vnf_resources = RwcalYang.VNFResources()
+        for r in self.resources[account.name].vdus.values():
+            vdu = RwcalYang.VDUInfoParams()
+            vdu.copy_from(r)
+            vnf_resources.vdu_info_list.append(vdu)
+        logger.debug("Returning list of vdu-info of size: %d", len(vnf_resources.vdu_info_list))
+        return vnf_resources
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_openmano/CMakeLists.txt b/modules/core/rwvx/rwcal/plugins/vala/rwcal_openmano/CMakeLists.txt
new file mode 100644 (file)
index 0000000..3f58d6e
--- /dev/null
@@ -0,0 +1,8 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwcal_openmano rwcal_openmano.py)
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_openmano/Makefile b/modules/core/rwvx/rwcal/plugins/vala/rwcal_openmano/Makefile
new file mode 100644 (file)
index 0000000..345c5f3
--- /dev/null
@@ -0,0 +1,24 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_openmano/rwcal_openmano.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_openmano/rwcal_openmano.py
new file mode 100644 (file)
index 0000000..7cce731
--- /dev/null
@@ -0,0 +1,238 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import logging
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+import rw_status
+import rwlogger
+
+logger = logging.getLogger('rwcal.openmano')
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+class ImageLocationError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class RwcalOpenmanoPlugin(GObject.Object, RwCal.Cloud):
+    """Stub implementation the CAL VALA methods for Openmano. """
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rwcal.openmano",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        If creds are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus(
+                status="success",
+                details=""
+                )
+        print("Returning status: %s", str(status))
+        return status
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        logger.warning("Creating image on openmano not supported")
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        return RwcalYang.VimResources()
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        return RwcalYang.VimResources()
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        return RwcalYang.VimResources()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_virtual_link_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_vdu_list(self, account):
+        raise NotImplementedError()
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/CMakeLists.txt b/modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/CMakeLists.txt
new file mode 100644 (file)
index 0000000..912a0b3
--- /dev/null
@@ -0,0 +1,23 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+include(rift_plugin)
+
+### rwcal-openstack package
+set(PKG_NAME rwcal-openstack)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+rift_install_python_plugin(rwcal_openstack rwcal_openstack.py)
+
+rift_python_install_tree(
+  FILES
+    rift/rwcal/openstack/__init__.py
+    rift/rwcal/openstack/openstack_drv.py
+    rift/rwcal/openstack/prepare_vm.py
+  PYTHON3_ONLY
+  COMPONENT ${PKG_LONG_NAME})
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/Makefile b/modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/Makefile
new file mode 100644 (file)
index 0000000..345c5f3
--- /dev/null
@@ -0,0 +1,24 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/__init__.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/__init__.py
new file mode 100644 (file)
index 0000000..9e7ec9d
--- /dev/null
@@ -0,0 +1 @@
+from .openstack_drv import OpenstackDriver
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py
new file mode 100644 (file)
index 0000000..e0efba6
--- /dev/null
@@ -0,0 +1,1686 @@
+#!/usr/bin/python
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import json
+import logging
+
+from keystoneclient import v3 as ksclientv3
+from keystoneclient.v2_0 import client as ksclientv2
+from novaclient.v2 import client as nova_client
+from neutronclient.neutron import client as ntclient
+from glanceclient.v2 import client as glclient
+from ceilometerclient import client as ceilo_client
+
+# Exceptions
+import novaclient.exceptions as NovaException
+import keystoneclient.exceptions as KeystoneExceptions
+import neutronclient.common.exceptions as NeutronException
+import glanceclient.exc as GlanceException
+
+logger = logging.getLogger('rwcal.openstack.drv')
+logger.setLevel(logging.DEBUG)
+
+class ValidationError(Exception):
+    pass
+
+
+class KeystoneDriver(object):
+    """
+    Driver base-class for keystoneclient APIs
+    """
+    def __init__(self, ksclient):
+        """
+        Constructor for KeystoneDriver base class
+        Arguments: None
+        Returns: None
+        """
+        self.ksclient = ksclient
+
+    def get_username(self):
+        """
+        Returns the username associated with keystoneclient connection
+        """
+        return self._username
+
+    def get_password(self):
+        """
+        Returns the password associated with keystoneclient connection
+        """
+        return self._password
+
+    def get_tenant_name(self):
+        """
+        Returns the tenant name associated with keystoneclient connection
+        """
+        return self._tenant_name
+
+    def _get_keystone_connection(self):
+        """
+        Returns object of class python-keystoneclient class
+        """
+        if not hasattr(self, '_keystone_connection'):
+            self._keystone_connection = self.ksclient(**self._get_keystone_credentials())
+        return self._keystone_connection
+
+    def is_auth_token_valid(self, token_expiry, time_fmt):
+        """
+        Performs validity on auth_token
+        Arguments:
+          token_expiry (string): Expiry time for token
+          time_fmt (string)    : Format for expiry string in auth_ref
+
+        Returns:
+        True/False (Boolean):  (auth_token is valid or auth_token is invalid)
+        """
+        import time
+        import datetime
+        now = datetime.datetime.timetuple(datetime.datetime.utcnow())
+        expires_at = time.strptime(token_expiry, time_fmt)
+        t_now = time.mktime(now)
+        t_expiry = time.mktime(expires_at)
+
+        if (t_expiry <= t_now) or ((t_expiry - t_now) < 300 ):
+            ### Token has expired or about to expire (5 minute)
+            delattr(self, '_keystone_connection')
+            return False
+        else:
+            return True
+
+    def get_service_endpoint(self, service_type, endpoint_type):
+        """
+        Returns requested type of endpoint for requested service type
+        Arguments:
+          service_type (string): Service Type (e.g. computev3, image, network)
+          endpoint_type(string): Endpoint Type (e.g. publicURL,adminURL,internalURL)
+        Returns:
+          service_endpoint(string): Service endpoint string
+        """
+        endpoint_kwargs   = {'service_type'  : service_type,
+                             'endpoint_type' : endpoint_type}
+        try:
+            ksconn = self._get_keystone_connection()
+            service_endpoint  = ksconn.service_catalog.url_for(**endpoint_kwargs)
+        except Exception as e:
+            logger.error("OpenstackDriver: Service Catalog discovery operation failed for service_type: %s, endpoint_type: %s. Exception: %s" %(service_type, endpoint_type, str(e)))
+            raise
+        return service_endpoint
+
+
+    def get_raw_token(self):
+        """
+        Returns a valid raw_auth_token string
+
+        Returns (string): raw_auth_token string
+        """
+        ksconn = self._get_keystone_connection()
+        try:
+            raw_token = ksconn.get_raw_token_from_identity_service(auth_url = self._auth_url,
+                                                                   token    = self.get_auth_token())
+        except KeystoneExceptions.AuthorizationFailure as e:
+            logger.error("OpenstackDriver: get_raw_token_from_identity_service Failure. Exception: %s" %(str(e)))
+            return None
+
+        except Exception as e:
+            logger.error("OpenstackDriver: Could not retrieve raw_token. Exception: %s" %(str(e)))
+
+        return raw_token
+
+    def get_tenant_id(self):
+        """
+        Returns tenant_id for the project/tenant. Tenant name is provided during
+        class instantiation
+
+        Returns (string): Tenant ID
+        """
+        ksconn = self._get_keystone_connection()
+        return ksconn.tenant_id
+
+    def tenant_list(self):
+        """
+        Returns list of tenants
+        """
+        pass
+
+    def tenant_create(self, name):
+        """
+        Create a new tenant
+        """
+        pass
+
+    def tenant_delete(self, tenant_id):
+        """
+        Deletes a tenant identified by tenant_id
+        """
+        pass
+
+    def roles_list(self):
+        pass
+
+    def roles_create(self):
+        pass
+
+    def roles_delete(self):
+        pass
+
+class KeystoneDriverV2(KeystoneDriver):
+    """
+    Driver class for keystoneclient V2 APIs
+    """
+    def __init__(self, username, password, auth_url,tenant_name):
+        """
+        Constructor for KeystoneDriverV3 class
+        Arguments:
+        username (string)  : Username
+        password (string)  : Password
+        auth_url (string)  : Authentication URL
+        tenant_name(string): Tenant Name
+
+        Returns: None
+        """
+        self._username = username
+        self._password = password
+        self._auth_url = auth_url
+        self._tenant_name = tenant_name
+        super(KeystoneDriverV2, self).__init__(ksclientv2.Client)
+
+    def _get_keystone_credentials(self):
+        """
+        Returns the dictionary of kwargs required to instantiate python-keystoneclient class
+        """
+        creds                 = {}
+        #creds['user_domain'] = self._domain_name
+        creds['username']     = self._username
+        creds['password']     = self._password
+        creds['auth_url']     = self._auth_url
+        creds['tenant_name']  = self._tenant_name
+        return creds
+
+    def get_auth_token(self):
+        """
+        Returns a valid auth_token
+
+        Returns (string): auth_token string
+        """
+        ksconn = self._get_keystone_connection()
+        return ksconn.auth_token
+
+    def is_auth_token_valid(self):
+        """
+        Performs validity on auth_token
+        Arguments:
+
+        Returns:
+        True/False (Boolean):  (auth_token is valid or auth_token is invalid)
+        """
+        ksconn = self._get_keystone_connection()
+        result = super(KeystoneDriverV2, self).is_auth_token_valid(ksconn.auth_ref['token']['expires'],
+                                                                   "%Y-%m-%dT%H:%M:%SZ")
+        return result
+
+
+class KeystoneDriverV3(KeystoneDriver):
+    """
+    Driver class for keystoneclient V3 APIs
+    """
+    def __init__(self, username, password, auth_url,tenant_name):
+        """
+        Constructor for KeystoneDriverV3 class
+        Arguments:
+        username (string)  : Username
+        password (string)  : Password
+        auth_url (string)  : Authentication URL
+        tenant_name(string): Tenant Name
+
+        Returns: None
+        """
+        self._username = username
+        self._password = password
+        self._auth_url = auth_url
+        self._tenant_name = tenant_name
+        super(KeystoneDriverV3, self).__init__(ksclientv3.Client)
+
+    def _get_keystone_credentials(self):
+        """
+        Returns the dictionary of kwargs required to instantiate python-keystoneclient class
+        """
+        creds = {}
+        #creds['user_domain']      = self._domain_name
+        creds['username']         = self._username
+        creds['password']         = self._password
+        creds['auth_url']         = self._auth_url
+        creds['project_name']     = self._tenant_name
+        return creds
+
+    def get_auth_token(self):
+        """
+        Returns a valid auth_token
+
+        Returns (string): auth_token string
+        """
+        ksconn = self._get_keystone_connection()
+        return ksconn.auth_ref['auth_token']
+
+    def is_auth_token_valid(self):
+        """
+        Performs validity on auth_token
+        Arguments:
+
+        Returns:
+        True/False (Boolean):  (auth_token is valid or auth_token is invalid)
+        """
+        ksconn = self._get_keystone_connection()
+        result = super(KeystoneDriverV3, self).is_auth_token_valid(ksconn.auth_ref['expires_at'],
+                                                                   "%Y-%m-%dT%H:%M:%S.%fZ")
+        return result
+
+class NovaDriver(object):
+    """
+    Driver for openstack nova_client
+    """
+    def __init__(self, ks_drv, service_name, version):
+        """
+        Constructor for NovaDriver
+        Arguments: KeystoneDriver class object
+        """
+        self.ks_drv = ks_drv
+        self._service_name = service_name
+        self._version = version
+
+    def _get_nova_credentials(self):
+        """
+        Returns a dictionary of kwargs required to instantiate python-novaclient class
+        """
+        creds = {}
+        creds['version']     = self._version
+        creds['bypass_url']  = self.ks_drv.get_service_endpoint(self._service_name, "publicURL")
+        creds['username']    = self.ks_drv.get_username()
+        creds['project_id']  = self.ks_drv.get_tenant_name()
+        creds['auth_token']  = self.ks_drv.get_auth_token()
+        return creds
+
+    def _get_nova_connection(self):
+        """
+        Returns an object of class python-novaclient
+        """
+        if not hasattr(self, '_nova_connection'):
+            self._nova_connection = nova_client.Client(**self._get_nova_credentials())
+        else:
+            # Reinitialize if auth_token is no longer valid
+            if not self.ks_drv.is_auth_token_valid():
+                self._nova_connection = nova_client.Client(**self._get_nova_credentials())
+        return self._nova_connection
+
+    def _flavor_get(self, flavor_id):
+        """
+        Get flavor by flavor_id
+        Arguments:
+           flavor_id(string): UUID of flavor_id
+
+        Returns:
+        dictionary of flavor parameters
+        """
+        nvconn = self._get_nova_connection()
+        try:
+            flavor = nvconn.flavors.get(flavor_id)
+        except Exception as e:
+            logger.info("OpenstackDriver: Did not find flavor with flavor_id : %s. Exception: %s"%(flavor_id, str(e)))
+            raise
+
+        try:
+            extra_specs = flavor.get_keys()
+        except Exception as e:
+            logger.info("OpenstackDriver: Could not get the EPA attributes for flavor with flavor_id : %s. Exception: %s"%(flavor_id, str(e)))
+            raise
+
+        response = flavor.to_dict()
+        assert 'extra_specs' not in response, "Key extra_specs present as flavor attribute"
+        response['extra_specs'] = extra_specs
+        return response
+
+    def flavor_get(self, flavor_id):
+        """
+        Get flavor by flavor_id
+        Arguments:
+           flavor_id(string): UUID of flavor_id
+
+        Returns:
+        dictionary of flavor parameters
+        """
+        return self._flavor_get(flavor_id)
+
+    def flavor_list(self):
+        """
+        Returns list of all flavors (dictionary per flavor)
+
+        Arguments:
+           None
+        Returns:
+           A list of dictionaries. Each dictionary contains attributes for a single flavor instance
+        """
+        flavors = []
+        flavor_info = []
+        nvconn =  self._get_nova_connection()
+        try:
+            flavors = nvconn.flavors.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: List Flavor operation failed. Exception: %s"%(str(e)))
+            raise
+        if flavors:
+            flavor_info = [ self.flavor_get(flv.id) for flv in flavors ]
+        return flavor_info
+
+    def flavor_create(self, name, ram, vcpu, disk, extra_specs):
+        """
+        Create a new flavor
+
+        Arguments:
+           name   (string):  Name of the new flavor
+           ram    (int)   :  Memory in MB
+           vcpus  (int)   :  Number of VCPUs
+           disk   (int)   :  Secondary storage size in GB
+           extra_specs (dictionary): EPA attributes dictionary
+
+        Returns:
+           flavor_id (string): UUID of flavor created
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            flavor = nvconn.flavors.create(name        = name,
+                                           ram         = ram,
+                                           vcpus       = vcpu,
+                                           disk        = disk,
+                                           flavorid    = 'auto',
+                                           ephemeral   = 0,
+                                           swap        = 0,
+                                           rxtx_factor = 1.0,
+                                           is_public    = True)
+        except Exception as e:
+            logger.error("OpenstackDriver: Create Flavor operation failed. Exception: %s"%(str(e)))
+            raise
+
+        if extra_specs:
+            try:
+                flavor.set_keys(extra_specs)
+            except Exception as e:
+                logger.error("OpenstackDriver: Set Key operation failed for flavor: %s. Exception: %s" %(flavor.id, str(e)))
+                raise
+        return flavor.id
+
+    def flavor_delete(self, flavor_id):
+        """
+        Deletes a flavor identified by flavor_id
+
+        Arguments:
+           flavor_id (string):  UUID of flavor to be deleted
+
+        Returns: None
+        """
+        assert flavor_id == self._flavor_get(flavor_id)['id']
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.flavors.delete(flavor_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete flavor operation failed for flavor: %s. Exception: %s" %(flavor_id, str(e)))
+            raise
+
+
+    def server_list(self):
+        """
+        Returns a list of available VMs for the project
+
+        Arguments: None
+
+        Returns:
+           A list of dictionaries. Each dictionary contains attributes associated
+           with individual VM
+        """
+        servers     = []
+        server_info = []
+        nvconn      = self._get_nova_connection()
+        try:
+            servers     = nvconn.servers.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: List Server operation failed. Exception: %s" %(str(e)))
+            raise
+        server_info = [ server.to_dict() for server in servers]
+        return server_info
+
+    def _nova_server_get(self, server_id):
+        """
+        Returns a dictionary of attributes associated with VM identified by service_id
+
+        Arguments:
+          server_id (string): UUID of the VM/server for which information is requested
+
+        Returns:
+          A dictionary object with attributes associated with VM identified by server_id
+        """
+        nvconn = self._get_nova_connection()
+        try:
+            server = nvconn.servers.get(server = server_id)
+        except Exception as e:
+            logger.info("OpenstackDriver: Get Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
+            raise
+        else:
+            return server.to_dict()
+
+    def server_get(self, server_id):
+        """
+        Returns a dictionary of attributes associated with VM identified by service_id
+
+        Arguments:
+          server_id (string): UUID of the VM/server for which information is requested
+
+        Returns:
+          A dictionary object with attributes associated with VM identified by server_id
+        """
+        return self._nova_server_get(server_id)
+
+    def server_create(self, **kwargs):
+        """
+        Creates a new VM/server instance
+
+        Arguments:
+          A dictionary of following key-value pairs
+         {
+           server_name(string)     : Name of the VM/Server
+           flavor_id  (string)     : UUID of the flavor to be used for VM
+           image_id   (string)     : UUID of the image to be used VM/Server instance
+           network_list(List)      : A List of network_ids. A port will be created in these networks
+           port_list (List)        : A List of port-ids. These ports will be added to VM.
+           metadata   (dict)       : A dictionary of arbitrary key-value pairs associated with VM/server
+           userdata   (string)     : A script which shall be executed during first boot of the VM
+         }
+        Returns:
+          server_id (string): UUID of the VM/server created
+
+        """
+        nics = []
+        if 'network_list' in kwargs:
+            for network_id in kwargs['network_list']:
+                nics.append({'net-id': network_id})
+
+        if 'port_list' in kwargs:
+            for port_id in kwargs['port_list']:
+                nics.append({'port-id': port_id})
+
+        nvconn = self._get_nova_connection()
+
+        try:
+            server = nvconn.servers.create(kwargs['name'],
+                                           kwargs['image_id'],
+                                           kwargs['flavor_id'],
+                                           meta                 = kwargs['metadata'],
+                                           files                = None,
+                                           reservation_id       = None,
+                                           min_count            = None,
+                                           max_count            = None,
+                                           userdata             = kwargs['userdata'],
+                                           security_groups      = kwargs['security_groups'],
+                                           availability_zone    = None,
+                                           block_device_mapping = None,
+                                           nics                 = nics,
+                                           scheduler_hints      = None,
+                                           config_drive         = None)
+        except Exception as e:
+            logger.info("OpenstackDriver: Create Server operation failed. Exception: %s" %(str(e)))
+            raise
+        return server.to_dict()['id']
+
+    def server_delete(self, server_id):
+        """
+        Deletes a server identified by server_id
+
+        Arguments:
+           server_id (string): UUID of the server to be deleted
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.delete(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_start(self, server_id):
+        """
+        Starts a server identified by server_id
+
+        Arguments:
+           server_id (string): UUID of the server to be started
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.start(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Start Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_stop(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be stopped
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.stop(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Stop Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_pause(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be paused
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.pause(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Pause Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_unpause(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be unpaused
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.unpause(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Resume Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+
+    def server_suspend(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be suspended
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.suspend(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Suspend Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+
+
+    def server_resume(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be resumed
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.resume(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Resume Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_reboot(self, server_id, reboot_type):
+        """
+        Arguments:
+           server_id (string) : UUID of the server to be rebooted
+           reboot_type(string):
+                         'SOFT': Soft Reboot
+                         'HARD': Hard Reboot
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.reboot(server_id, reboot_type)
+        except Exception as e:
+            logger.error("OpenstackDriver: Reboot Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_rebuild(self, server_id, image_id):
+        """
+        Arguments:
+           server_id (string) : UUID of the server to be rebooted
+           image_id (string)  : UUID of the image to use
+        Returns: None
+        """
+
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.rebuild(server_id, image_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Rebuild Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+
+    def server_add_port(self, server_id, port_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server
+           port_id   (string): UUID of the port to be attached
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.interface_attach(server_id,
+                                            port_id,
+                                            net_id = None,
+                                            fixed_ip = None)
+        except Exception as e:
+            logger.error("OpenstackDriver: Server Port Add operation failed for server_id : %s, port_id : %s. Exception: %s" %(server_id, port_id, str(e)))
+            raise
+
+    def server_delete_port(self, server_id, port_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server
+           port_id   (string): UUID of the port to be deleted
+        Returns: None
+
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.interface_detach(server_id, port_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Server Port Delete operation failed for server_id : %s, port_id : %s. Exception: %s" %(server_id, port_id, str(e)))
+            raise
+
+    def floating_ip_list(self):
+        """
+        Arguments:
+            None
+        Returns:
+            List of objects of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+        """
+        nvconn =  self._get_nova_connection()
+        return nvconn.floating_ips.list()
+
+    def floating_ip_create(self, pool):
+        """
+        Arguments:
+           pool (string): Name of the pool (optional)
+        Returns:
+           An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            floating_ip = nvconn.floating_ips.create(pool)
+        except Exception as e:
+            logger.error("OpenstackDriver: Floating IP Create operation failed. Exception: %s"  %str(e))
+            raise
+
+        return floating_ip
+
+    def floating_ip_delete(self, floating_ip):
+        """
+        Arguments:
+           floating_ip: An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+        Returns:
+           An object of floating IP nova objects (novaclient.v2.floating_ips.FloatingIP)
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            floating_ip = nvconn.floating_ips.delete(floating_ip)
+        except Exception as e:
+            logger.error("OpenstackDriver: Floating IP Delete operation failed. Exception: %s"  %str(e))
+            raise
+
+    def floating_ip_assign(self, server_id, floating_ip, fixed_ip):
+        """
+        Arguments:
+           server_id (string)  : UUID of the server
+           floating_ip (string): IP address string for floating-ip
+           fixed_ip (string)   : IP address string for the fixed-ip with which floating ip will be associated
+        Returns:
+           None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.add_floating_ip(server_id, floating_ip, fixed_ip)
+        except Exception as e:
+            logger.error("OpenstackDriver: Assign Floating IP operation failed. Exception: %s"  %str(e))
+            raise
+
+    def floating_ip_release(self, server_id, floating_ip):
+        """
+        Arguments:
+           server_id (string)  : UUID of the server
+           floating_ip (string): IP address string for floating-ip
+        Returns:
+           None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.remove_floating_ip(server_id, floating_ip)
+        except Exception as e:
+            logger.error("OpenstackDriver: Release Floating IP operation failed. Exception: %s"  %str(e))
+            raise
+
+
+
+
+class NovaDriverV2(NovaDriver):
+    """
+    Driver class for novaclient V2 APIs
+    """
+    def __init__(self, ks_drv):
+        """
+        Constructor for NovaDriver
+        Arguments: KeystoneDriver class object
+        """
+        super(NovaDriverV2, self).__init__(ks_drv, 'compute', '2')
+
+class NovaDriverV21(NovaDriver):
+    """
+    Driver class for novaclient V2 APIs
+    """
+    def __init__(self, ks_drv):
+        """
+        Constructor for NovaDriver
+        Arguments: KeystoneDriver class object
+        """
+        super(NovaDriverV21, self).__init__(ks_drv, 'computev21', '3')
+
+class GlanceDriver(object):
+    """
+    Driver for openstack glance-client
+    """
+    def __init__(self, ks_drv, service_name, version):
+        """
+        Constructor for GlanceDriver
+        Arguments: KeystoneDriver class object
+        """
+        self.ks_drv = ks_drv
+        self._service_name = service_name
+        self._version = version
+
+    def _get_glance_credentials(self):
+        """
+        Returns a dictionary of kwargs required to instantiate python-glanceclient class
+
+        Arguments: None
+
+        Returns:
+           A dictionary object of arguments
+        """
+        creds  =  {}
+        creds['version']  = self._version
+        creds['endpoint'] = self.ks_drv.get_service_endpoint(self._service_name, 'publicURL')
+        creds['token']    = self.ks_drv.get_auth_token()
+        return creds
+
+    def _get_glance_connection(self):
+        """
+        Returns a object of class python-glanceclient
+        """
+        if not hasattr(self, '_glance_connection'):
+            self._glance_connection = glclient.Client(**self._get_glance_credentials())
+        else:
+            # Reinitialize if auth_token is no longer valid
+            if not self.ks_drv.is_auth_token_valid():
+                self._glance_connection = glclient.Client(**self._get_glance_credentials())
+        return self._glance_connection
+
+    def image_list(self):
+        """
+        Returns list of dictionaries. Each dictionary contains attributes associated with
+        image
+
+        Arguments: None
+
+        Returns: List of dictionaries.
+        """
+        glconn = self._get_glance_connection()
+        images = []
+        try:
+            image_info = glconn.images.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: List Image operation failed. Exception: %s" %(str(e)))
+            raise
+        images = [ img for img in image_info ]
+        return images
+
+    def image_create(self, **kwargs):
+        """
+        Creates an image
+        Arguments:
+           A dictionary of kwargs with following keys
+           {
+              'name'(string)         : Name of the image
+              'location'(string)     : URL (http://....) where image is located
+              'disk_format'(string)  : Disk format
+                    Possible values are 'ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'
+              'container_format'(string): Container format
+                                       Possible values are 'ami', 'ari', 'aki', 'bare', 'ovf'
+              'tags'                 : A list of user tags
+           }
+        Returns:
+           image_id (string)  : UUID of the image
+
+        """
+        glconn = self._get_glance_connection()
+        try:
+            image = glconn.images.create(**kwargs)
+        except Exception as e:
+            logger.error("OpenstackDriver: Create Image operation failed. Exception: %s" %(str(e)))
+            raise
+
+        return image.id
+
+    def image_upload(self, image_id, fd):
+        """
+        Upload the image
+
+        Arguments:
+            image_id: UUID of the image
+            fd      : File descriptor for the image file
+        Returns: None
+        """
+        glconn = self._get_glance_connection()
+        try:
+            glconn.images.upload(image_id, fd)
+        except Exception as e:
+            logger.error("OpenstackDriver: Image upload operation failed. Exception: %s" %(str(e)))
+            raise
+
+    def image_add_location(self, image_id, location, metadata):
+        """
+        Add image URL location
+
+        Arguments:
+           image_id : UUID of the image
+           location : http URL for the image
+
+        Returns: None
+        """
+        glconn = self._get_glance_connection()
+        try:
+            image = glconn.images.add_location(image_id, location, metadata)
+        except Exception as e:
+            logger.error("OpenstackDriver: Image location add operation failed. Exception: %s" %(str(e)))
+            raise
+
+    def image_update(self):
+        pass
+
+    def image_delete(self, image_id):
+        """
+        Delete an image
+
+        Arguments:
+           image_id: UUID of the image
+
+        Returns: None
+
+        """
+        assert image_id == self._image_get(image_id)['id']
+        glconn = self._get_glance_connection()
+        try:
+            glconn.images.delete(image_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete Image operation failed for image_id : %s. Exception: %s" %(image_id, str(e)))
+            raise
+
+
+    def _image_get(self, image_id):
+        """
+        Returns a dictionary object of VM image attributes
+
+        Arguments:
+           image_id (string): UUID of the image
+
+        Returns:
+           A dictionary of the image attributes
+        """
+        glconn = self._get_glance_connection()
+        try:
+            image = glconn.images.get(image_id)
+        except Exception as e:
+            logger.info("OpenstackDriver: Get Image operation failed for image_id : %s. Exception: %s" %(image_id, str(e)))
+            raise
+        return image
+
+    def image_get(self, image_id):
+        """
+        Returns a dictionary object of VM image attributes
+
+        Arguments:
+           image_id (string): UUID of the image
+
+        Returns:
+           A dictionary of the image attributes
+        """
+        return self._image_get(image_id)
+
+class GlanceDriverV2(GlanceDriver):
+    """
+    Driver for openstack glance-client V2
+    """
+    def __init__(self, ks_drv):
+        super(GlanceDriverV2, self).__init__(ks_drv, 'image', 2)
+
+class NeutronDriver(object):
+    """
+    Driver for openstack neutron neutron-client
+    """
+    def __init__(self, ks_drv, service_name, version):
+        """
+        Constructor for NeutronDriver
+        Arguments: KeystoneDriver class object
+        """
+        self.ks_drv = ks_drv
+        self._service_name = service_name
+        self._version = version
+
+    def _get_neutron_credentials(self):
+        """
+        Returns a dictionary of kwargs required to instantiate python-neutronclient class
+
+        Returns:
+          Dictionary of kwargs
+        """
+        creds = {}
+        creds['api_version']  = self._version
+        creds['endpoint_url'] = self.ks_drv.get_service_endpoint(self._service_name, 'publicURL')
+        creds['token']        = self.ks_drv.get_auth_token()
+        creds['tenant_name']  = self.ks_drv.get_tenant_name()
+        return creds
+
+    def _get_neutron_connection(self):
+        """
+        Returns an object of class python-neutronclient
+        """
+        if not hasattr(self, '_neutron_connection'):
+            self._neutron_connection = ntclient.Client(**self._get_neutron_credentials())
+        else:
+            # Reinitialize if auth_token is no longer valid
+            if not self.ks_drv.is_auth_token_valid():
+                self._neutron_connection = ntclient.Client(**self._get_neutron_credentials())
+        return self._neutron_connection
+
+    def network_list(self):
+        """
+        Returns list of dictionaries. Each dictionary contains the attributes for a network
+        under project
+
+        Arguments: None
+
+        Returns:
+          A list of dictionaries
+        """
+        networks = []
+        ntconn   = self._get_neutron_connection()
+        try:
+            networks = ntconn.list_networks()
+        except Exception as e:
+            logger.error("OpenstackDriver: List Network operation failed. Exception: %s" %(str(e)))
+            raise
+        return networks['networks']
+
+    def network_create(self, **kwargs):
+        """
+        Creates a new network for the project
+
+        Arguments:
+          A dictionary with following key-values
+        {
+          name (string)              : Name of the network
+          admin_state_up(Boolean)    : True/False (Defaults: True)
+          external_router(Boolean)   : Connectivity with external router. True/False (Defaults: False)
+          shared(Boolean)            : Shared among tenants. True/False (Defaults: False)
+          physical_network(string)   : The physical network where this network object is implemented (optional).
+          network_type               : The type of physical network that maps to this network resource (optional).
+                                       Possible values are: 'flat', 'vlan', 'vxlan', 'gre'
+          segmentation_id            : An isolated segment on the physical network. The network_type attribute
+                                       defines the segmentation model. For example, if the network_type value
+                                       is vlan, this ID is a vlan identifier. If the network_type value is gre,
+                                       this ID is a gre key.
+        }
+        """
+        params = {'network':
+                  {'name'                 : kwargs['name'],
+                   'admin_state_up'       : kwargs['admin_state_up'],
+                   'tenant_id'            : self.ks_drv.get_tenant_id(),
+                   'shared'               : kwargs['shared'],
+                   #'port_security_enabled': port_security_enabled,
+                   'router:external'      : kwargs['external_router']}}
+
+        if 'physical_network' in kwargs:
+            params['network']['provider:physical_network'] = kwargs['physical_network']
+        if 'network_type' in kwargs:
+            params['network']['provider:network_type'] = kwargs['network_type']
+        if 'segmentation_id' in kwargs:
+            params['network']['provider:segmentation_id'] = kwargs['segmentation_id']
+
+        ntconn = self._get_neutron_connection()
+        try:
+            logger.debug("Calling neutron create_network() with params: %s", str(params))
+            net = ntconn.create_network(params)
+        except Exception as e:
+            logger.error("OpenstackDriver: Create Network operation failed. Exception: %s" %(str(e)))
+            raise
+        logger.debug("Got create_network response from neutron connection: %s", str(net))
+        network_id = net['network']['id']
+        if not network_id:
+            raise Exception("Empty network id returned from create_network. (params: %s)" % str(params))
+
+        return network_id
+
+    def network_delete(self, network_id):
+        """
+        Deletes a network identified by network_id
+
+        Arguments:
+          network_id (string): UUID of the network
+
+        Returns: None
+        """
+        assert network_id == self._network_get(network_id)['id']
+        ntconn = self._get_neutron_connection()
+        try:
+            ntconn.delete_network(network_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete Network operation failed. Exception: %s" %(str(e)))
+            raise
+
+    def _network_get(self, network_id):
+        """
+        Returns a dictionary object describing the attributes of the network
+
+        Arguments:
+           network_id (string): UUID of the network
+
+        Returns:
+           A dictionary object of the network attributes
+        """
+        ntconn = self._get_neutron_connection()
+        network = ntconn.list_networks(id = network_id)['networks']
+        if not network:
+            raise NeutronException.NotFound("Network with id %s not found"%(network_id))
+
+        return network[0]
+
+    def network_get(self, network_id):
+        """
+        Returns a dictionary object describing the attributes of the network
+
+        Arguments:
+           network_id (string): UUID of the network
+
+        Returns:
+           A dictionary object of the network attributes
+        """
+        return self._network_get(network_id)
+
+    def subnet_create(self, network_id, cidr):
+        """
+        Creates a subnet on the network
+
+        Arguments:
+           network_id(string): UUID of the network where subnet needs to be created
+           cidr (string)     : IPv4 address prefix (e.g. '1.1.1.0/24') for the subnet
+
+        Returns:
+           subnet_id (string): UUID of the created subnet
+        """
+        params = {'subnets': [{'cidr': cidr,
+                               'ip_version': 4,
+                               'network_id': network_id,
+                               'gateway_ip': None}]}
+        ntconn = self._get_neutron_connection()
+        try:
+            subnet = ntconn.create_subnet(params)
+        except Exception as e:
+            logger.error("OpenstackDriver: Create Subnet operation failed. Exception: %s" %(str(e)))
+            raise
+
+        return subnet['subnets'][0]['id']
+
+    def subnet_list(self):
+        """
+        Returns a list of dictionaries. Each dictionary contains attributes describing the subnet
+
+        Arguments: None
+
+        Returns:
+           A dictionary of the objects of subnet attributes
+        """
+        ntconn = self._get_neutron_connection()
+        try:
+            subnets = ntconn.list_subnets()['subnets']
+        except Exception as e:
+            logger.error("OpenstackDriver: List Subnet operation failed. Exception: %s" %(str(e)))
+            raise
+        return subnets
+
+    def _subnet_get(self, subnet_id):
+        """
+        Returns a dictionary object describing the attributes of a subnet.
+
+        Arguments:
+           subnet_id (string): UUID of the subnet
+
+        Returns:
+           A dictionary object of the subnet attributes
+        """
+        ntconn = self._get_neutron_connection()
+        subnets = ntconn.list_subnets(id=subnet_id)
+        if not subnets['subnets']:
+            raise NeutronException.NotFound("Could not find subnet_id %s" %(subnet_id))
+        return subnets['subnets'][0]
+
+    def subnet_get(self, subnet_id):
+        """
+        Returns a dictionary object describing the attributes of a subnet.
+
+        Arguments:
+           subnet_id (string): UUID of the subnet
+
+        Returns:
+           A dictionary object of the subnet attributes
+        """
+        return self._subnet_get(subnet_id)
+
+    def subnet_delete(self, subnet_id):
+        """
+        Deletes a subnet identified by subnet_id
+
+        Arguments:
+           subnet_id (string): UUID of the subnet to be deleted
+
+        Returns: None
+        """
+        ntconn = self._get_neutron_connection()
+        assert subnet_id == self._subnet_get(self,subnet_id)
+        try:
+            ntconn.delete_subnet(subnet_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete Subnet operation failed for subnet_id : %s. Exception: %s" %(subnet_id, str(e)))
+            raise
+
+    def port_list(self, **kwargs):
+        """
+        Returns a list of dictionaries. Each dictionary contains attributes describing the port
+
+        Arguments:
+            kwargs (dictionary): A dictionary for filters for port_list operation
+
+        Returns:
+           A dictionary of the objects of port attributes
+
+        """
+        ports  = []
+        ntconn = self._get_neutron_connection()
+
+        kwargs['tenant_id'] = self.ks_drv.get_tenant_id()
+
+        try:
+            ports  = ntconn.list_ports(**kwargs)
+        except Exception as e:
+            logger.info("OpenstackDriver: List Port operation failed. Exception: %s" %(str(e)))
+            raise
+        return ports['ports']
+
+    def port_create(self, **kwargs):
+        """
+        Create a port in network
+
+        Arguments:
+           A dictionary of following
+           {
+              name (string)      : Name of the port
+              network_id(string) : UUID of the network_id identifying the network to which port belongs
+              subnet_id(string)  : UUID of the subnet_id from which IP-address will be assigned to port
+              vnic_type(string)  : Possible values are "normal", "direct", "macvtap"
+           }
+        Returns:
+           port_id (string)   : UUID of the port
+        """
+        params = {
+            "port": {
+                "admin_state_up"    : kwargs['admin_state_up'],
+                "name"              : kwargs['name'],
+                "network_id"        : kwargs['network_id'],
+                "fixed_ips"         : [ {"subnet_id": kwargs['subnet_id']}],
+                "binding:vnic_type" : kwargs['port_type']}}
+
+        ntconn = self._get_neutron_connection()
+        try:
+            port  = ntconn.create_port(params)
+        except Exception as e:
+            logger.error("OpenstackDriver: Port Create operation failed. Exception: %s" %(str(e)))
+            raise
+        return port['port']['id']
+
+    def _port_get(self, port_id):
+        """
+        Returns a dictionary object describing the attributes of the port
+
+        Arguments:
+           port_id (string): UUID of the port
+
+        Returns:
+           A dictionary object of the port attributes
+        """
+        ntconn = self._get_neutron_connection()
+        port   = ntconn.list_ports(id=port_id)['ports']
+        if not port:
+            raise NeutronException.NotFound("Could not find port_id %s" %(port_id))
+        return port[0]
+
+    def port_get(self, port_id):
+        """
+        Returns a dictionary object describing the attributes of the port
+
+        Arguments:
+           port_id (string): UUID of the port
+
+        Returns:
+           A dictionary object of the port attributes
+        """
+        return self._port_get(port_id)
+
+    def port_delete(self, port_id):
+        """
+        Deletes a port identified by port_id
+
+        Arguments:
+           port_id (string) : UUID of the port
+
+        Returns: None
+        """
+        assert port_id == self._port_get(port_id)['id']
+        ntconn = self._get_neutron_connection()
+        try:
+            ntconn.delete_port(port_id)
+        except Exception as e:
+            logger.error("Port Delete operation failed for port_id : %s. Exception: %s" %(port_id, str(e)))
+            raise
+
+class NeutronDriverV2(NeutronDriver):
+    """
+    Driver for openstack neutron neutron-client v2
+    """
+    def __init__(self, ks_drv):
+        """
+        Constructor for NeutronDriver
+        Arguments: KeystoneDriver class object
+        """
+        super(NeutronDriverV2, self).__init__(ks_drv, 'network', '2.0')
+
+class CeilometerDriver(object):
+    """
+    Driver for openstack ceilometer_client
+    """
+    def __init__(self, ks_drv, service_name, version):
+        """
+        Constructor for CeilometerDriver
+        Arguments: KeystoneDriver class object
+        """
+        self.ks_drv = ks_drv
+        self._service_name = service_name
+        self._version = version
+
+    def _get_ceilometer_credentials(self):
+        """
+        Returns a dictionary of kwargs required to instantiate python-ceilometerclient class
+        """
+        creds = {}
+        creds['version']     = self._version
+        creds['endpoint']    = self.ks_drv.get_service_endpoint(self._service_name, "publicURL")
+        creds['token']  = self.ks_drv.get_auth_token()
+        return creds
+
+    def _get_ceilometer_connection(self):
+        """
+        Returns an object of class python-ceilometerclient
+        """
+        if not hasattr(self, '_ceilometer_connection'):
+            self._ceilometer_connection = ceilo_client.Client(**self._get_ceilometer_credentials())
+        else:
+            # Reinitialize if auth_token is no longer valid
+            if not self.ks_drv.is_auth_token_valid():
+                self._ceilometer_connection = ceilo_client.Client(**self._get_ceilometer_credentials())
+        return self._ceilometer_connection
+
+    def get_ceilo_endpoint(self):
+        """
+        Returns the service endpoint for a ceilometer connection
+        """
+        try:
+            ceilocreds = self._get_ceilometer_credentials()
+        except KeystoneExceptions.EndpointNotFound as e:
+            return None
+
+        return ceilocreds['endpoint']
+
+    def meter_list(self):
+        """
+        Returns a list of meters.
+
+        Returns:
+           A list of meters
+        """
+        ceiloconn = self._get_ceilometer_connection()
+
+        try:
+            meters  = ceiloconn.meters.list()
+        except Exception as e:
+            logger.info("OpenstackDriver: List meters operation failed. Exception: %s" %(str(e)))
+            raise
+        return meters
+
+    def get_usage(self, vm_instance_id, meter_name, period):
+        ceiloconn = self._get_ceilometer_connection()
+        try:
+            query = [dict(field='resource_id', op='eq', value=vm_instance_id)]
+            stats = ceiloconn.statistics.list(meter_name, q=query, period=period)
+            usage = 0
+            if stats:
+                stat = stats[-1]
+                usage = stat.avg
+        except Exception as e:
+            logger.info("OpenstackDriver: Get %s[%s] operation failed. Exception: %s" %(meter_name, vm_instance_id, str(e)))
+            raise
+
+        return usage
+
+    def get_samples(self, vim_instance_id, counter_name, limit=1):
+        try:
+            ceiloconn = self._get_ceilometer_connection()
+            filter = json.dumps({
+                "and": [
+                    {"=": {"resource": vim_instance_id}},
+                    {"=": {"counter_name": counter_name}}
+                    ]
+                })
+            result = ceiloconn.query_samples.query(filter=filter, limit=limit)
+            return result[-limit:]
+
+        except Exception as e:
+            logger.exception(e)
+
+        return []
+
+
+class CeilometerDriverV2(CeilometerDriver):
+    """
+    Driver for openstack ceilometer ceilometer-client
+    """
+    def __init__(self, ks_drv):
+        """
+        Constructor for CeilometerDriver
+        Arguments: CeilometerDriver class object
+        """
+        super(CeilometerDriverV2, self).__init__(ks_drv, 'metering', '2')
+
+class OpenstackDriver(object):
+    """
+    Driver for openstack nova and neutron services
+    """
+    def __init__(self,username, password, auth_url, tenant_name, mgmt_network = None):
+
+        if auth_url.find('/v3') != -1:
+            self.ks_drv        = KeystoneDriverV3(username, password, auth_url, tenant_name)
+            self.glance_drv    = GlanceDriverV2(self.ks_drv)
+            self.nova_drv      = NovaDriverV21(self.ks_drv)
+            self.neutron_drv   = NeutronDriverV2(self.ks_drv)
+            self.ceilo_drv     = CeilometerDriverV2(self.ks_drv)
+        elif auth_url.find('/v2') != -1:
+            self.ks_drv        = KeystoneDriverV2(username, password, auth_url, tenant_name)
+            self.glance_drv    = GlanceDriverV2(self.ks_drv)
+            self.nova_drv      = NovaDriverV2(self.ks_drv)
+            self.neutron_drv   = NeutronDriverV2(self.ks_drv)
+            self.ceilo_drv     = CeilometerDriverV2(self.ks_drv)
+        else:
+            raise NotImplementedError("Auth URL is wrong or invalid. Only Keystone v2 & v3 supported")
+
+        if mgmt_network != None:
+            self._mgmt_network = mgmt_network
+
+            networks = []
+            try:
+                ntconn   = self.neutron_drv._get_neutron_connection()
+                networks = ntconn.list_networks()
+            except Exception as e:
+                logger.error("OpenstackDriver: List Network operation failed. Exception: %s" %(str(e)))
+                raise
+
+            network_list = [ network for network in networks['networks'] if network['name'] == mgmt_network ]
+
+            if not network_list:
+                raise NeutronException.NotFound("Could not find network %s" %(mgmt_network))
+            self._mgmt_network_id = network_list[0]['id']
+
+    def validate_account_creds(self):
+        try:
+            ksconn = self.ks_drv._get_keystone_connection()
+        except KeystoneExceptions.AuthorizationFailure as e:
+            logger.error("OpenstackDriver: Unable to authenticate or validate the existing credentials. Exception: %s" %(str(e)))
+            raise ValidationError("Invalid Credentials: "+ str(e))
+        except Exception as e:
+            logger.error("OpenstackDriver: Could not connect to Openstack. Exception: %s" %(str(e)))
+            raise ValidationError("Connection Error: "+ str(e))
+
+    def get_mgmt_network_id(self):
+        return self._mgmt_network_id
+
+    def glance_image_create(self, **kwargs):
+        if not 'disk_format' in kwargs:
+            kwargs['disk_format'] = 'qcow2'
+        if not 'container_format' in kwargs:
+            kwargs['container_format'] = 'bare'
+        if not 'min_disk' in kwargs:
+            kwargs['min_disk'] = 0
+        if not 'min_ram' in kwargs:
+            kwargs['min_ram'] = 0
+        return self.glance_drv.image_create(**kwargs)
+
+    def glance_image_upload(self, image_id, fd):
+        self.glance_drv.image_upload(image_id, fd)
+
+    def glance_image_add_location(self, image_id, location):
+        self.glance_drv.image_add_location(image_id, location)
+
+    def glance_image_delete(self, image_id):
+        self.glance_drv.image_delete(image_id)
+
+    def glance_image_list(self):
+        return self.glance_drv.image_list()
+
+    def glance_image_get(self, image_id):
+        return self.glance_drv.image_get(image_id)
+
+
+    def nova_flavor_list(self):
+        return self.nova_drv.flavor_list()
+
+    def nova_flavor_create(self, name, ram, vcpus, disk, epa_specs):
+        extra_specs = epa_specs if epa_specs else {}
+        return self.nova_drv.flavor_create(name,
+                                           ram         = ram,
+                                           vcpu        = vcpus,
+                                           disk        = disk,
+                                           extra_specs = extra_specs)
+
+    def nova_flavor_delete(self, flavor_id):
+        self.nova_drv.flavor_delete(flavor_id)
+
+    def nova_flavor_get(self, flavor_id):
+        return self.nova_drv.flavor_get(flavor_id)
+
+    def nova_server_create(self, **kwargs):
+        assert kwargs['flavor_id'] == self.nova_drv.flavor_get(kwargs['flavor_id'])['id']
+        image = self.glance_drv.image_get(kwargs['image_id'])
+        if image['status'] != 'active':
+            raise GlanceException.NotFound("Image with image_id: %s not found in active state. Current State: %s" %(image['id'], image['status']))
+        # if 'network_list' in kwargs:
+        #     kwargs['network_list'].append(self._mgmt_network_id)
+        # else:
+        #     kwargs['network_list'] = [self._mgmt_network_id]
+
+        if 'security_groups' not in kwargs:
+            nvconn = self.nova_drv._get_nova_connection()
+            sec_groups = nvconn.security_groups.list()
+            if sec_groups:
+                ## Should we add VM in all availability security_groups ???
+                kwargs['security_groups'] = [x.name for x in sec_groups]
+            else:
+                kwargs['security_groups'] = None
+
+        return self.nova_drv.server_create(**kwargs)
+
+    def nova_server_add_port(self, server_id, port_id):
+        self.nova_drv.server_add_port(server_id, port_id)
+
+    def nova_server_delete_port(self, server_id, port_id):
+        self.nova_drv.server_delete_port(server_id, port_id)
+
+    def nova_server_start(self, server_id):
+        self.nova_drv.server_start(server_id)
+
+    def nova_server_stop(self, server_id):
+        self.nova_drv.server_stop(server_id)
+
+    def nova_server_delete(self, server_id):
+        self.nova_drv.server_delete(server_id)
+
+    def nova_server_reboot(self, server_id):
+        self.nova_drv.server_reboot(server_id, reboot_type='HARD')
+
+    def nova_server_rebuild(self, server_id, image_id):
+        self.nova_drv.server_rebuild(server_id, image_id)
+
+    def nova_floating_ip_list(self):
+        return self.nova_drv.floating_ip_list()
+
+    def nova_floating_ip_create(self, pool = None):
+        return self.nova_drv.floating_ip_create(pool)
+
+    def nova_floating_ip_delete(self, floating_ip):
+        self.nova_drv.floating_ip_delete(floating_ip)
+
+    def nova_floating_ip_assign(self, server_id, floating_ip, fixed_ip):
+        self.nova_drv.floating_ip_assign(server_id, floating_ip, fixed_ip)
+
+    def nova_floating_ip_release(self, server_id, floating_ip):
+        self.nova_drv.floating_ip_release(server_id, floating_ip)
+
+    def nova_server_list(self):
+        return self.nova_drv.server_list()
+
+    def nova_server_get(self, server_id):
+        return self.nova_drv.server_get(server_id)
+
+    def neutron_network_list(self):
+        return self.neutron_drv.network_list()
+
+    def neutron_network_get(self, network_id):
+        return self.neutron_drv.network_get(network_id)
+
+    def neutron_network_create(self, **kwargs):
+        return self.neutron_drv.network_create(**kwargs)
+
+    def neutron_network_delete(self, network_id):
+        self.neutron_drv.network_delete(network_id)
+
+    def neutron_subnet_list(self):
+        return self.neutron_drv.subnet_list()
+
+    def neutron_subnet_get(self, subnet_id):
+        return self.neutron_drv.subnet_get(subnet_id)
+
+    def neutron_subnet_create(self, network_id, cidr):
+        return self.neutron_drv.subnet_create(network_id, cidr)
+
+    def netruon_subnet_delete(self, subnet_id):
+        self.neutron_drv.subnet_delete(subnet_id)
+
+    def neutron_port_list(self, **kwargs):
+        return self.neutron_drv.port_list(**kwargs)
+
+    def neutron_port_get(self, port_id):
+        return self.neutron_drv.port_get(port_id)
+
+    def neutron_port_create(self, **kwargs):
+        subnets = [subnet for subnet in self.neutron_drv.subnet_list() if subnet['network_id'] == kwargs['network_id']]
+        assert len(subnets) == 1
+        kwargs['subnet_id'] = subnets[0]['id']
+        if not 'admin_state_up' in kwargs:
+            kwargs['admin_state_up'] = True
+        port_id =  self.neutron_drv.port_create(**kwargs)
+
+        if 'vm_id' in kwargs:
+            self.nova_server_add_port(kwargs['vm_id'], port_id)
+        return port_id
+
+    def neutron_port_delete(self, port_id):
+        self.neutron_drv.port_delete(port_id)
+
+    def ceilo_meter_endpoint(self):
+        return self.ceilo_drv.get_ceilo_endpoint()
+
+    def ceilo_meter_list(self):
+        return self.ceilo_drv.meter_list()
+
+    def ceilo_nfvi_metrics(self, vmid):
+        metrics = dict()
+
+        # The connection is created once and reused for each of the samples
+        # acquired.
+        conn = self.ceilo_drv._get_ceilometer_connection()
+
+        def get_samples(vim_instance_id, counter_name, limit=1):
+            try:
+                filter = json.dumps({
+                    "and": [
+                        {"=": {"resource": vim_instance_id}},
+                        {"=": {"counter_name": counter_name}}
+                        ]
+                    })
+                result = conn.query_samples.query(filter=filter, limit=limit)
+                return result[-limit:]
+
+            except Exception as e:
+                logger.exception(e)
+
+            return []
+
+        cpu_util = get_samples(
+                    vim_instance_id=vmid,
+                    counter_name="cpu_util",
+                    )
+
+        memory_usage = get_samples(
+                    vim_instance_id=vmid,
+                    counter_name="memory.usage",
+                    )
+
+        disk_usage = get_samples(
+                    vim_instance_id=vmid,
+                    counter_name="disk.usage",
+                    )
+
+        if cpu_util:
+            metrics["cpu_util"] = cpu_util[-1].volume
+
+        if memory_usage:
+            metrics["memory_usage"] = 1e6 * memory_usage[-1].volume
+
+        if disk_usage:
+            metrics["disk_usage"] = disk_usage[-1].volume
+
+        return metrics
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py
new file mode 100644 (file)
index 0000000..0aeb0b0
--- /dev/null
@@ -0,0 +1,213 @@
+#!/usr/bin/env python3
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import rift.rwcal.openstack as openstack_drv
+import logging
+import argparse
+import sys, os, time
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger('rift.cal.openstack.prepare_vm')
+
+
+def assign_floating_ip_address(drv, argument):
+    if not argument.floating_ip:
+        return
+    
+    logger.info("Assigning the floating_ip: %s" %(argument.floating_ip))
+    
+    for i in range(120):
+        server = drv.nova_server_get(argument.server_id)
+        for network_name,network_info in server['addresses'].items():
+            if network_info:
+                if network_name == argument.mgmt_network:
+                    for n_info in network_info:
+                        if 'OS-EXT-IPS:type' in n_info and n_info['OS-EXT-IPS:type'] == 'fixed':
+                            management_ip = n_info['addr']
+                            drv.nova_floating_ip_assign(argument.server_id,
+                                                        argument.floating_ip,
+                                                        management_ip)
+                            logger.info("Assigned floating_ip: %s to management_ip: %s" %(argument.floating_ip, management_ip))
+                        return
+        logger.info("Waiting for management_ip to be assigned to server: %s" %(server))
+        time.sleep(1)
+    else:
+        logger.info("No management_ip IP available to associate floating_ip for server: %s" %(server))
+    return
+
+
+def create_port_metadata(drv, argument):
+    if argument.port_metadata == False:
+        return
+
+    ### Get Management Network ID
+    network_list = drv.neutron_network_list()
+    mgmt_network_id = [net['id'] for net in network_list if net['name'] == argument.mgmt_network][0]
+    port_list = [ port for port in drv.neutron_port_list(**{'device_id': argument.server_id})
+                  if port['network_id'] != mgmt_network_id ]
+    meta_data = {}
+
+    meta_data['rift-meta-ports'] = str(len(port_list))
+    port_id = 0
+    for port in port_list:
+        info = []
+        info.append('"port_name":"'+port['name']+'"')
+        if 'mac_address' in port:
+            info.append('"hw_addr":"'+port['mac_address']+'"')
+        if 'network_id' in port:
+            #info.append('"network_id":"'+port['network_id']+'"')
+            net_name = [net['name'] for net in network_list if net['id'] == port['network_id']]
+            if net_name:
+                info.append('"network_name":"'+net_name[0]+'"')
+        if 'fixed_ips' in port:
+            ip_address = port['fixed_ips'][0]['ip_address']
+            info.append('"ip":"'+ip_address+'"')
+            
+        meta_data['rift-meta-port-'+str(port_id)] = '{' + ','.join(info) + '}'
+        port_id += 1
+        
+    nvconn = drv.nova_drv._get_nova_connection()
+    nvconn.servers.set_meta(argument.server_id, meta_data)
+    
+        
+def prepare_vm_after_boot(drv,argument):
+    ### Important to call create_port_metadata before assign_floating_ip_address
+    ### since assign_floating_ip_address can wait thus delaying port_metadata creation
+
+    ### Wait for 2 minute for server to come up -- Needs fine tuning
+    wait_time = 120 
+    sleep_time = 0.2
+    for i in range(int(wait_time/sleep_time)):
+        server = drv.nova_server_get(argument.server_id)
+        if server['status'] == 'ACTIVE':
+            break
+        elif server['status'] == 'BUILD':
+            logger.info("Waiting for server to build")
+            time.sleep(sleep_time)
+        else:
+            logger.info("Server reached state: %s" %(server['status']))
+            sys.exit(3)
+    else:
+        logger.info("Server did not reach active state in %d seconds. Current state: %s" %(wait_time, server['status']))
+        sys.exit(4)
+    
+    #create_port_metadata(drv, argument)
+    assign_floating_ip_address(drv, argument)
+    
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to create openstack resources')
+    parser.add_argument('--auth_url',
+                        action = "store",
+                        dest = "auth_url",
+                        type = str,
+                        help='Keystone Auth URL')
+
+    parser.add_argument('--username',
+                        action = "store",
+                        dest = "username",
+                        type = str,
+                        help = "Username for openstack installation")
+
+    parser.add_argument('--password',
+                        action = "store",
+                        dest = "password",
+                        type = str,
+                        help = "Password for openstack installation")
+
+    parser.add_argument('--tenant_name',
+                        action = "store",
+                        dest = "tenant_name",
+                        type = str,
+                        help = "Tenant name openstack installation")
+
+    parser.add_argument('--mgmt_network',
+                        action = "store",
+                        dest = "mgmt_network",
+                        type = str,
+                        help = "mgmt_network")
+    
+    parser.add_argument('--server_id',
+                        action = "store",
+                        dest = "server_id",
+                        type = str,
+                        help = "Server ID on which boot operations needs to be performed")
+    
+    parser.add_argument('--floating_ip',
+                        action = "store",
+                        dest = "floating_ip",
+                        type = str,
+                        help = "Floating IP to be assigned")
+
+    parser.add_argument('--port_metadata',
+                        action = "store_true",
+                        dest = "port_metadata",
+                        default = False,
+                        help = "Create Port Metadata")
+
+    argument = parser.parse_args()
+
+    if not argument.auth_url:
+        logger.error("ERROR: AuthURL is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using AuthURL: %s" %(argument.auth_url))
+
+    if not argument.username:
+        logger.error("ERROR: Username is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Username: %s" %(argument.username))
+
+    if not argument.password:
+        logger.error("ERROR: Password is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Password: %s" %(argument.password))
+
+    if not argument.tenant_name:
+        logger.error("ERROR: Tenant Name is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Tenant Name: %s" %(argument.tenant_name))
+
+    if not argument.mgmt_network:
+        logger.error("ERROR: Management Network Name is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Management Network: %s" %(argument.mgmt_network))
+        
+    if not argument.server_id:
+        logger.error("ERROR: Server ID is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Server ID : %s" %(argument.server_id))
+        
+        
+    try:
+        pid = os.fork()
+        if pid > 0:
+            # exit for parent
+            sys.exit(0)
+    except OSError as e:
+        logger.error("fork failed: %d (%s)\n" % (e.errno, e.strerror))
+        sys.exit(2)
+        
+    drv = openstack_drv.OpenstackDriver(username = argument.username,
+                                        password = argument.password,
+                                        auth_url = argument.auth_url,
+                                        tenant_name = argument.tenant_name,
+                                        mgmt_network = argument.mgmt_network)
+    prepare_vm_after_boot(drv, argument)
+    sys.exit(0)
+    
+if __name__ == "__main__":
+    main()
+        
+
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py
new file mode 100644 (file)
index 0000000..869b439
--- /dev/null
@@ -0,0 +1,1416 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import time
+import threading
+import logging
+import rift.rwcal.openstack as openstack_drv
+import rw_status
+import rwlogger
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+import os, subprocess
+
+PREPARE_VM_CMD = "prepare_vm.py --auth_url {auth_url} --username {username} --password {password} --tenant_name {tenant_name} --mgmt_network {mgmt_network} --server_id {server_id} --port_metadata"
+
+logger = logging.getLogger('rwcal.openstack')
+logger.setLevel(logging.DEBUG)
+
+rwstatus = rw_status.rwstatus_from_exc_map({ IndexError: RwTypes.RwStatus.NOTFOUND,
+                                             KeyError: RwTypes.RwStatus.NOTFOUND,
+                                             NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,})
+
+
+class RwcalOpenstackPlugin(GObject.Object, RwCal.Cloud):
+    """This class implements the CAL VALA methods for openstack."""
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self._driver_class = openstack_drv.OpenstackDriver
+
+
+    def _get_driver(self, account):
+        try:
+            drv = self._driver_class(username     = account.openstack.key,
+                                  password     = account.openstack.secret,
+                                  auth_url     = account.openstack.auth_url,
+                                  tenant_name  = account.openstack.tenant,
+                                  mgmt_network = account.openstack.mgmt_network)
+        except Exception as e:
+            logger.error("RwcalOpenstackPlugin: OpenstackDriver init failed. Exception: %s" %(str(e)))
+            raise
+
+        return drv
+
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(rwlogger.RwLogger(category="rwcal-openstack",
+                                                log_hdl=rwlog_ctx,))
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        Performs an access to the resources using Keystone API. If creds
+        are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus()
+
+        try:
+            drv = self._get_driver(account)
+        except Exception as e:
+            msg = "RwcalOpenstackPlugin: OpenstackDriver connection failed. Exception: %s" %(str(e))
+            logger.error(msg)
+            status.status = "failure"
+            status.details = msg
+            return status
+
+        try:
+            drv.validate_account_creds()
+        except openstack_drv.ValidationError as e:
+            logger.error("RwcalOpenstackPlugin: OpenstackDriver credential validation failed. Exception: %s", str(e))
+            status.status = "failure"
+            status.details = "Invalid Credentials: %s" % str(e)
+            return status
+
+        status.status = "success"
+        status.details = "Connection was successful"
+        return status
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_management_network(self, account):
+        """
+        Returns the management network associated with the specified account.
+        Arguments:
+            account - a cloud account
+
+        Returns: 
+            The management network
+        """
+        return account.openstack.mgmt_network
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_tenant(self, account, name):
+        """Create a new tenant.
+
+        Arguments:
+            account - a cloud account
+            name - name of the tenant
+
+        Returns:
+            The tenant id
+        """
+        raise NotImplementedError
+    
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """delete a tenant.
+
+        Arguments:
+            account - a cloud account
+            tenant_id - id of the tenant
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """List tenants.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of tenants
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_role(self, account, name):
+        """Create a new user.
+
+        Arguments:
+            account - a cloud account
+            name - name of the user
+
+        Returns:
+            The user id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """Delete a user.
+
+        Arguments:
+            account - a cloud account
+            role_id - id of the user
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """List roles.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of roles
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_image(self, account, image):
+        """Create an image
+
+        Arguments:
+            account - a cloud account
+            image - a description of the image to create
+
+        Returns:
+            The image id
+        """
+        try:
+            fd = open(image.location, "rb")
+        except Exception as e:
+            logger.error("Could not open file: %s for upload. Exception received: %s", image.location, str(e))
+            raise
+
+        kwargs = {}
+        kwargs['name'] = image.name
+        
+        if image.disk_format:
+            kwargs['disk_format'] = image.disk_format
+        if image.container_format:
+            kwargs['container_format'] = image.container_format
+
+        drv = self._get_driver(account)
+        # Create Image
+        image_id = drv.glance_image_create(**kwargs)
+        # Upload the Image
+        drv.glance_image_upload(image_id, fd)
+
+        return image_id
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """Delete a vm image.
+
+        Arguments:
+            account - a cloud account
+            image_id - id of the image to delete
+        """
+        self._get_driver(account).glance_image_delete(image_id = image_id)
+
+
+    @staticmethod
+    def _fill_image_info(img_info):
+        """Create a GI object from image info dictionary
+
+        Converts image information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            account - a cloud account
+            img_info - image information dictionary object from openstack
+
+        Returns:
+            The ImageInfoItem
+        """
+        img = RwcalYang.ImageInfoItem()
+        img.name = img_info['name']
+        img.id = img_info['id']
+        img.checksum = img_info['checksum']
+        img.disk_format = img_info['disk_format']
+        img.container_format = img_info['container_format']
+        if img_info['status'] == 'active':
+            img.state = 'active'
+        else:
+            img.state = 'inactive'
+        return img
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """Return a list of the names of all available images.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            The the list of images in VimResources object
+        """
+        response = RwcalYang.VimResources()
+        image_list = []
+        images = self._get_driver(account).glance_image_list()
+        for img in images:
+            response.imageinfo_list.append(RwcalOpenstackPlugin._fill_image_info(img))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        """Return a image information.
+
+        Arguments:
+            account - a cloud account
+            image_id - an id of the image
+
+        Returns:
+            ImageInfoItem object containing image information.
+        """
+        image = self._get_driver(account).glance_image_get(image_id)
+        return RwcalOpenstackPlugin._fill_image_info(image)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vm(self, account, vminfo):
+        """Create a new virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vminfo - information that defines the type of VM to create
+
+        Returns:
+            The image id
+        """
+        kwargs = {}
+        kwargs['name']      = vminfo.vm_name
+        kwargs['flavor_id'] = vminfo.flavor_id
+        kwargs['image_id']  = vminfo.image_id
+        
+        if vminfo.has_field('cloud_init') and vminfo.cloud_init.has_field('userdata'):
+            kwargs['userdata']  = vminfo.cloud_init.userdata
+        else:
+            kwargs['userdata'] = ''
+            
+        if account.openstack.security_groups:
+            kwargs['security_groups'] = account.openstack.security_groups
+        
+        port_list = []
+        for port in vminfo.port_list:
+            port_list.append(port.port_id)
+
+        if port_list:
+            kwargs['port_list'] = port_list    
+
+        network_list = []
+        for network in vminfo.network_list:
+            network_list.append(network.network_id)
+
+        if network_list:
+            kwargs['network_list'] = network_list
+            
+        metadata = {}
+        for field in vminfo.user_tags.fields:
+            if vminfo.user_tags.has_field(field):
+                metadata[field] = getattr(vminfo.user_tags, field)
+        kwargs['metadata']  = metadata 
+
+        return self._get_driver(account).nova_server_create(**kwargs)
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """Start an existing virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        self._get_driver(account).nova_server_start(vm_id)
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """Stop a running virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        self._get_driver(account).nova_server_stop(vm_id)
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """Delete a virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        self._get_driver(account).nova_server_delete(vm_id)
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """Reboot a virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        self._get_driver(account).nova_server_reboot(vm_id)
+
+    @staticmethod
+    def _fill_vm_info(vm_info, mgmt_network):
+        """Create a GI object from vm info dictionary
+
+        Converts VM information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            vm_info - VM information from openstack
+            mgmt_network - Management network
+
+        Returns:
+            Protobuf Gi object for VM
+        """
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_id     = vm_info['id']
+        vm.vm_name   = vm_info['name']
+        vm.image_id  = vm_info['image']['id']
+        vm.flavor_id = vm_info['flavor']['id']
+        vm.state     = vm_info['status']
+        for network_name, network_info in vm_info['addresses'].items():
+            if network_info:
+                if network_name == mgmt_network:
+                    vm.public_ip = next((item['addr']
+                                            for item in network_info
+                                            if item['OS-EXT-IPS:type'] == 'floating'),
+                                        network_info[0]['addr'])
+                    vm.management_ip = network_info[0]['addr']
+                else:
+                    for interface in network_info:
+                        addr = vm.private_ip_list.add()
+                        addr.ip_address = interface['addr']
+        # Look for any metadata
+        for key, value in vm_info['metadata'].items():
+            if key in vm.user_tags.fields:
+                setattr(vm.user_tags, key, value)
+        if 'OS-EXT-SRV-ATTR:host' in vm_info:
+            if vm_info['OS-EXT-SRV-ATTR:host'] != None:
+                vm.host_name = vm_info['OS-EXT-SRV-ATTR:host']
+        if 'OS-EXT-AZ:availability_zone' in vm_info:
+            if vm_info['OS-EXT-AZ:availability_zone'] != None:
+                vm.availability_zone = vm_info['OS-EXT-AZ:availability_zone']
+        return vm
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        """Return a list of the VMs as vala boxed objects
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List containing VM information
+        """
+        response = RwcalYang.VimResources()
+        vms = self._get_driver(account).nova_server_list()
+        for vm in vms:
+            response.vminfo_list.append(RwcalOpenstackPlugin._fill_vm_info(vm, account.openstack.mgmt_network))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vm(self, account, id):
+        """Return vm information.
+
+        Arguments:
+            account - a cloud account
+            id - an id for the VM
+
+        Returns:
+            VM information
+        """
+        vm = self._get_driver(account).nova_server_get(id)
+        return RwcalOpenstackPlugin._fill_vm_info(vm, account.openstack.mgmt_network)
+
+    @staticmethod
+    def _get_guest_epa_specs(guest_epa):
+        """
+        Returns EPA Specs dictionary for guest_epa attributes
+        """
+        epa_specs = {}
+        if guest_epa.has_field('mempage_size'):
+            if guest_epa.mempage_size == 'LARGE':
+                epa_specs['hw:mem_page_size'] = 'large'
+            elif guest_epa.mempage_size == 'SMALL':
+                epa_specs['hw:mem_page_size'] = 'small'
+            elif guest_epa.mempage_size == 'SIZE_2MB':
+                epa_specs['hw:mem_page_size'] = 2048
+            elif guest_epa.mempage_size == 'SIZE_1GB':
+                epa_specs['hw:mem_page_size'] = 1048576
+            elif guest_epa.mempage_size == 'PREFER_LARGE':
+                epa_specs['hw:mem_page_size'] = 'large'
+            else:
+                assert False, "Unsupported value for mempage_size"
+        
+        if guest_epa.has_field('cpu_pinning_policy'):
+            if guest_epa.cpu_pinning_policy == 'DEDICATED':
+                epa_specs['hw:cpu_policy'] = 'dedicated'
+            elif guest_epa.cpu_pinning_policy == 'SHARED':
+                epa_specs['hw:cpu_policy'] = 'shared'
+            elif guest_epa.cpu_pinning_policy == 'ANY':
+                pass
+            else:
+                assert False, "Unsupported value for cpu_pinning_policy"
+                
+        if guest_epa.has_field('cpu_thread_pinning_policy'):
+            if guest_epa.cpu_thread_pinning_policy == 'AVOID':
+                epa_specs['hw:cpu_threads_policy'] = 'avoid'
+            elif guest_epa.cpu_thread_pinning_policy == 'SEPARATE':
+                epa_specs['hw:cpu_threads_policy'] = 'separate'
+            elif guest_epa.cpu_thread_pinning_policy == 'ISOLATE':
+                epa_specs['hw:cpu_threads_policy'] = 'isolate'
+            elif guest_epa.cpu_thread_pinning_policy == 'PREFER':
+                epa_specs['hw:cpu_threads_policy'] = 'prefer'
+            else:
+                assert False, "Unsupported value for cpu_thread_pinning_policy"
+                    
+        if guest_epa.has_field('trusted_execution'):
+            if guest_epa.trusted_execution == True:
+                epa_specs['trust:trusted_host'] = 'trusted'
+                
+        if guest_epa.has_field('numa_node_policy'):
+            if guest_epa.numa_node_policy.has_field('node_cnt'):
+                epa_specs['hw:numa_nodes'] = guest_epa.numa_node_policy.node_cnt
+
+            if guest_epa.numa_node_policy.has_field('mem_policy'):
+                if guest_epa.numa_node_policy.mem_policy == 'STRICT':
+                    epa_specs['hw:numa_mempolicy'] = 'strict'
+                elif guest_epa.numa_node_policy.mem_policy == 'PREFERRED':
+                    epa_specs['hw:numa_mempolicy'] = 'preferred'
+                else:
+                    assert False, "Unsupported value for num_node_policy.mem_policy"
+
+            if guest_epa.numa_node_policy.has_field('node'):
+                for node in guest_epa.numa_node_policy.node:
+                    if node.has_field('vcpu') and node.vcpu:
+                        epa_specs['hw:numa_cpus.'+str(node.id)] = ','.join([str(j) for j in node.vcpu])
+                    if node.memory_mb:
+                        epa_specs['hw:numa_mem.'+str(node.id)] = str(node.memory_mb)
+
+        if guest_epa.has_field('pcie_device'):
+            pci_devices = []
+            for device in guest_epa.pcie_device:
+                pci_devices.append(device.device_id +':'+str(device.count))
+            epa_specs['pci_passthrough:alias'] = ','.join(pci_devices)
+
+        return epa_specs
+
+    @staticmethod
+    def _get_host_epa_specs(guest_epa):
+        """
+        Returns EPA Specs dictionary for host_epa attributes
+        """
+        host_epa = {}
+        return host_epa
+
+    @staticmethod
+    def _get_hypervisor_epa_specs(guest_epa):
+        """
+        Returns EPA Specs dictionary for hypervisor_epa attributes
+        """
+        hypervisor_epa = {}
+        return hypervisor_epa
+    
+    @staticmethod
+    def _get_vswitch_epa_specs(guest_epa):
+        """
+        Returns EPA Specs dictionary for vswitch_epa attributes
+        """
+        vswitch_epa = {}
+        return vswitch_epa
+    
+    @staticmethod
+    def _get_epa_specs(flavor):
+        """
+        Returns epa_specs dictionary based on flavor information
+        """
+        epa_specs = {}
+        if flavor.guest_epa:
+            guest_epa = RwcalOpenstackPlugin._get_guest_epa_specs(flavor.guest_epa)
+            epa_specs.update(guest_epa)
+        if flavor.host_epa:
+            host_epa = RwcalOpenstackPlugin._get_host_epa_specs(flavor.host_epa)
+            epa_specs.update(host_epa)
+        if flavor.hypervisor_epa:
+            hypervisor_epa = RwcalOpenstackPlugin._get_hypervisor_epa_specs(flavor.hypervisor_epa)
+            epa_specs.update(hypervisor_epa)
+        if flavor.vswitch_epa:
+            vswitch_epa = RwcalOpenstackPlugin._get_vswitch_epa_specs(flavor.vswitch_epa)
+            epa_specs.update(vswitch_epa)
+            
+        return epa_specs
+    
+    @rwstatus(ret_on_failure=[""])
+    def do_create_flavor(self, account, flavor):
+        """Create new flavor.
+
+        Arguments:
+            account - a cloud account
+            flavor - flavor of the VM
+
+        Returns:
+            flavor id
+        """
+        epa_specs = RwcalOpenstackPlugin._get_epa_specs(flavor)
+        return self._get_driver(account).nova_flavor_create(name      = flavor.name,
+                                                            ram       = flavor.vm_flavor.memory_mb,
+                                                            vcpus     = flavor.vm_flavor.vcpu_count,
+                                                            disk      = flavor.vm_flavor.storage_gb,
+                                                            epa_specs = epa_specs)
+
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """Delete flavor.
+
+        Arguments:
+            account - a cloud account
+            flavor_id - id flavor of the VM
+        """
+        self._get_driver(account).nova_flavor_delete(flavor_id)
+
+    @staticmethod
+    def _fill_epa_attributes(flavor, flavor_info):
+        """Helper function to populate the EPA attributes 
+
+        Arguments:
+              flavor     : Object with EPA attributes
+              flavor_info: A dictionary of flavor_info received from openstack
+        Returns:
+              None
+        """
+        getattr(flavor, 'vm_flavor').vcpu_count  = flavor_info['vcpus']
+        getattr(flavor, 'vm_flavor').memory_mb   = flavor_info['ram']
+        getattr(flavor, 'vm_flavor').storage_gb  = flavor_info['disk']
+
+        ### If extra_specs in flavor_info
+        if not 'extra_specs' in flavor_info:
+            return
+
+        if 'hw:cpu_policy' in flavor_info['extra_specs']:
+            getattr(flavor, 'guest_epa').cpu_pinning_policy = flavor_info['extra_specs']['hw:cpu_policy'].upper()
+
+        if 'hw:cpu_threads_policy' in flavor_info['extra_specs']:
+            getattr(flavor, 'guest_epa').cpu_thread_pinning_policy = flavor_info['extra_specs']['hw:cpu_threads_policy'].upper()
+            
+        if 'hw:mem_page_size' in flavor_info['extra_specs']:
+            if flavor_info['extra_specs']['hw:mem_page_size'] == 'large':
+                getattr(flavor, 'guest_epa').mempage_size = 'LARGE'
+            elif flavor_info['extra_specs']['hw:mem_page_size'] == 'small':
+                getattr(flavor, 'guest_epa').mempage_size = 'SMALL'
+            elif flavor_info['extra_specs']['hw:mem_page_size'] == 2048:
+                getattr(flavor, 'guest_epa').mempage_size = 'SIZE_2MB'
+            elif flavor_info['extra_specs']['hw:mem_page_size'] == 1048576:
+                getattr(flavor, 'guest_epa').mempage_size = 'SIZE_1GB'
+                
+        if 'hw:numa_nodes' in flavor_info['extra_specs']:
+            getattr(flavor,'guest_epa').numa_node_policy.node_cnt = int(flavor_info['extra_specs']['hw:numa_nodes'])
+            for node_id in range(getattr(flavor,'guest_epa').numa_node_policy.node_cnt):
+                numa_node = getattr(flavor,'guest_epa').numa_node_policy.node.add()
+                numa_node.id = node_id
+                if 'hw:numa_cpus.'+str(node_id) in flavor_info['extra_specs']:
+                    numa_node.vcpu = [int(x) for x in flavor_info['extra_specs']['hw:numa_cpus.'+str(node_id)].split(',')]
+                if 'hw:numa_mem.'+str(node_id) in flavor_info['extra_specs']:
+                    numa_node.memory_mb = int(flavor_info['extra_specs']['hw:numa_mem.'+str(node_id)]) 
+
+        if 'hw:numa_mempolicy' in flavor_info['extra_specs']:
+            if flavor_info['extra_specs']['hw:numa_mempolicy'] == 'strict':
+                getattr(flavor,'guest_epa').numa_node_policy.mem_policy = 'STRICT'
+            elif flavor_info['extra_specs']['hw:numa_mempolicy'] == 'preferred':
+                getattr(flavor,'guest_epa').numa_node_policy.mem_policy = 'PREFERRED'
+
+                
+        if 'trust:trusted_host' in flavor_info['extra_specs']:
+            if flavor_info['extra_specs']['trust:trusted_host'] == 'trusted':
+                getattr(flavor,'guest_epa').trusted_execution = True
+            elif flavor_info['extra_specs']['trust:trusted_host'] == 'untrusted':
+                getattr(flavor,'guest_epa').trusted_execution = False
+
+        if 'pci_passthrough:alias' in flavor_info['extra_specs']:
+            device_types = flavor_info['extra_specs']['pci_passthrough:alias']
+            for device in device_types.split(','):
+                dev = getattr(flavor,'guest_epa').pcie_device.add()
+                dev.device_id = device.split(':')[0]
+                dev.count = int(device.split(':')[1])
+
+            
+    @staticmethod
+    def _fill_flavor_info(flavor_info):
+        """Create a GI object from flavor info dictionary
+
+        Converts Flavor information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            flavor_info: Flavor information from openstack
+
+        Returns:
+             Object of class FlavorInfoItem
+        """
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name                       = flavor_info['name']
+        flavor.id                         = flavor_info['id']
+        RwcalOpenstackPlugin._fill_epa_attributes(flavor, flavor_info)
+        return flavor
+    
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """Return flavor information.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of flavors
+        """
+        response = RwcalYang.VimResources()
+        flavors = self._get_driver(account).nova_flavor_list()
+        for flv in flavors:
+            response.flavorinfo_list.append(RwcalOpenstackPlugin._fill_flavor_info(flv))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, id):
+        """Return flavor information.
+
+        Arguments:
+            account - a cloud account
+            id - an id for the flavor
+
+        Returns:
+            Flavor info item
+        """
+        flavor = self._get_driver(account).nova_flavor_get(id)
+        return RwcalOpenstackPlugin._fill_flavor_info(flavor)
+
+    
+    def _fill_network_info(self, network_info, account):
+        """Create a GI object from network info dictionary
+
+        Converts Network information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            network_info - Network information from openstack
+            account - a cloud account
+
+        Returns:
+            Network info item
+        """
+        network                  = RwcalYang.NetworkInfoItem()
+        network.network_name     = network_info['name']
+        network.network_id       = network_info['id']
+        if ('provider:network_type' in network_info) and (network_info['provider:network_type'] != None):
+            network.provider_network.overlay_type = network_info['provider:network_type'].upper()
+        if ('provider:segmentation_id' in network_info) and (network_info['provider:segmentation_id']):
+            network.provider_network.segmentation_id = network_info['provider:segmentation_id']
+        if ('provider:physical_network' in network_info) and (network_info['provider:physical_network']):
+            network.provider_network.physical_network = network_info['provider:physical_network'].upper()
+
+        if 'subnets' in network_info and network_info['subnets']:
+            subnet_id = network_info['subnets'][0]
+            subnet = self._get_driver(account).neutron_subnet_get(subnet_id)
+            network.subnet = subnet['cidr']
+        return network
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        """Return a list of networks
+
+        Arguments:
+            account - a cloud account
+        
+        Returns:
+            List of networks
+        """
+        response = RwcalYang.VimResources()
+        networks = self._get_driver(account).neutron_network_list()
+        for network in networks:
+            response.networkinfo_list.append(self._fill_network_info(network, account))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, id):
+        """Return a network
+
+        Arguments:
+            account - a cloud account
+            id - an id for the network
+
+        Returns:
+            Network info item
+        """
+        network = self._get_driver(account).neutron_network_get(id)
+        return self._fill_network_info(network, account)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_network(self, account, network):
+        """Create a new network
+
+        Arguments:
+            account - a cloud account
+            network - Network object
+
+        Returns:
+            Network id
+        """
+        kwargs = {}
+        kwargs['name']            = network.network_name
+        kwargs['admin_state_up']  = True
+        kwargs['external_router'] = False
+        kwargs['shared']          = False
+        
+        if network.has_field('provider_network'):
+            if network.provider_network.has_field('physical_network'):
+                kwargs['physical_network'] = network.provider_network.physical_network
+            if network.provider_network.has_field('overlay_type'):
+                kwargs['network_type'] = network.provider_network.overlay_type.lower()
+            if network.provider_network.has_field('segmentation_id'):
+                kwargs['segmentation_id'] = network.provider_network.segmentation_id
+            
+        network_id = self._get_driver(account).neutron_network_create(**kwargs)
+        self._get_driver(account).neutron_subnet_create(network_id = network_id,
+                                                        cidr = network.subnet)
+        return network_id
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        """Delete a network
+
+        Arguments:
+            account - a cloud account
+            network_id - an id for the network
+        """
+        self._get_driver(account).neutron_network_delete(network_id)
+
+    @staticmethod
+    def _fill_port_info(port_info):
+        """Create a GI object from port info dictionary
+
+        Converts Port information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            port_info - Port information from openstack
+
+        Returns:
+            Port info item
+        """
+        port = RwcalYang.PortInfoItem()
+
+        port.port_name  = port_info['name']
+        port.port_id    = port_info['id']
+        port.network_id = port_info['network_id']
+        port.port_state = port_info['status']
+        if 'device_id' in port_info:
+            port.vm_id = port_info['device_id']
+        if 'fixed_ips' in port_info:
+            port.ip_address = port_info['fixed_ips'][0]['ip_address']
+        return port
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        """Return a port
+
+        Arguments:
+            account - a cloud account
+            port_id - an id for the port
+
+        Returns:
+            Port info item
+        """
+        port = self._get_driver(account).neutron_port_get(port_id)
+        return RwcalOpenstackPlugin._fill_port_info(port)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        """Return a list of ports
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            Port info list
+        """
+        response = RwcalYang.VimResources()
+        ports = self._get_driver(account).neutron_port_list(*{})
+        for port in ports:
+            response.portinfo_list.append(RwcalOpenstackPlugin._fill_port_info(port))
+        return response
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_port(self, account, port):
+        """Create a new port
+
+        Arguments:
+            account - a cloud account
+            port - port object
+
+        Returns:
+            Port id
+        """
+        kwargs = {}
+        kwargs['name'] = port.port_name
+        kwargs['network_id'] = port.network_id
+        kwargs['admin_state_up'] = True
+        if port.has_field('vm_id'):
+            kwargs['vm_id'] = port.vm_id
+        if port.has_field('port_type'):
+            kwargs['port_type'] = port.port_type
+        else:
+            kwargs['port_type'] = "normal"
+
+        return self._get_driver(account).neutron_port_create(**kwargs)
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        """Delete a port
+
+        Arguments:
+            account - a cloud account
+            port_id - an id for port
+        """
+        self._get_driver(account).neutron_port_delete(port_id)
+    @rwstatus(ret_on_failure=[""])
+    def do_add_host(self, account, host):
+        """Add a new host
+
+        Arguments:
+            account - a cloud account
+            host - a host object
+
+        Returns:
+            An id for the host
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        """Remove a host
+
+        Arguments:
+            account - a cloud account
+            host_id - an id for the host
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        """Return a host
+
+        Arguments:
+            account - a cloud account
+            host_id - an id for host
+
+        Returns:
+            Host info item
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        """Return a list of hosts
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of hosts
+        """
+        raise NotImplementedError
+
+    @staticmethod
+    def _fill_connection_point_info(c_point, port_info):
+        """Create a GI object for RwcalYang.VDUInfoParams_ConnectionPoints()
+
+        Converts Port information dictionary object returned by openstack
+        driver into Protobuf Gi Object  
+
+        Arguments:
+            port_info - Port information from openstack
+        Returns:
+            Protobuf Gi object for RwcalYang.VDUInfoParams_ConnectionPoints
+        """
+        c_point.name = port_info['name']
+        c_point.connection_point_id = port_info['id']
+        if ('fixed_ips' in port_info) and (len(port_info['fixed_ips']) >= 1):
+            if 'ip_address' in port_info['fixed_ips'][0]:
+                c_point.ip_address = port_info['fixed_ips'][0]['ip_address']
+        if port_info['status'] == 'ACTIVE':
+            c_point.state = 'active'
+        else:
+            c_point.state = 'inactive'
+        if 'network_id' in port_info:    
+            c_point.virtual_link_id = port_info['network_id']
+        if ('device_id' in port_info) and (port_info['device_id']):
+            c_point.vdu_id = port_info['device_id']
+        
+    @staticmethod
+    def _fill_virtual_link_info(network_info, port_list, subnet):
+        """Create a GI object for VirtualLinkInfoParams
+
+        Converts Network and Port information dictionary object
+        returned by openstack driver into Protobuf Gi Object  
+
+        Arguments:
+            network_info - Network information from openstack
+            port_list - A list of port information from openstack
+            subnet: Subnet information from openstack
+        Returns:
+            Protobuf Gi object for VirtualLinkInfoParams
+        """
+        link = RwcalYang.VirtualLinkInfoParams()
+        link.name  = network_info['name']
+        if network_info['status'] == 'ACTIVE':
+            link.state = 'active'
+        else:
+            link.state = 'inactive'
+        link.virtual_link_id = network_info['id']
+        for port in port_list:
+            if port['device_owner'] == 'compute:None':
+                c_point = link.connection_points.add()
+                RwcalOpenstackPlugin._fill_connection_point_info(c_point, port)
+
+        if subnet != None:
+            link.subnet = subnet['cidr']
+
+        if ('provider:network_type' in network_info) and (network_info['provider:network_type'] != None):
+            link.provider_network.overlay_type = network_info['provider:network_type'].upper()
+        if ('provider:segmentation_id' in network_info) and (network_info['provider:segmentation_id']):
+            link.provider_network.segmentation_id = network_info['provider:segmentation_id']
+        if ('provider:physical_network' in network_info) and (network_info['provider:physical_network']):
+            link.provider_network.physical_network = network_info['provider:physical_network'].upper()
+
+        return link
+
+    @staticmethod
+    def _fill_vdu_info(vm_info, flavor_info, mgmt_network, port_list):
+        """Create a GI object for VDUInfoParams
+
+        Converts VM information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            vm_info - VM information from openstack
+            flavor_info - VM Flavor information from openstack
+            mgmt_network - Management network
+            port_list - A list of port information from openstack
+        Returns:
+            Protobuf Gi object for VDUInfoParams
+        """
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.name = vm_info['name']
+        vdu.vdu_id = vm_info['id']
+        for network_name, network_info in vm_info['addresses'].items():
+            if network_info and network_name == mgmt_network:
+                for interface in network_info:
+                    if 'OS-EXT-IPS:type' in interface:
+                        if interface['OS-EXT-IPS:type'] == 'fixed':
+                            vdu.management_ip = interface['addr']
+                        elif interface['OS-EXT-IPS:type'] == 'floating':
+                            vdu.public_ip = interface['addr']
+                
+        # Look for any metadata
+        for key, value in vm_info['metadata'].items():
+            if key == 'node_id':
+                vdu.node_id = value
+        if ('image' in vm_info) and ('id' in vm_info['image']):
+            vdu.image_id = vm_info['image']['id']
+        if ('flavor' in vm_info) and ('id' in vm_info['flavor']):
+            vdu.flavor_id = vm_info['flavor']['id']
+
+        if vm_info['status'] == 'ACTIVE':
+            vdu.state = 'active'
+        elif vm_info['status'] == 'ERROR':
+            vdu.state = 'failed'
+        else:
+            vdu.state = 'inactive'
+        # Fill the port information
+        for port in port_list:
+            c_point = vdu.connection_points.add()
+            RwcalOpenstackPlugin._fill_connection_point_info(c_point, port)
+
+        if flavor_info is not None:
+            RwcalOpenstackPlugin._fill_epa_attributes(vdu, flavor_info)
+        return vdu
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        network = RwcalYang.NetworkInfoItem()
+        network.network_name = link_params.name
+        network.subnet       = link_params.subnet
+
+        if link_params.provider_network:
+            for field in link_params.provider_network.fields:
+                if link_params.provider_network.has_field(field):
+                    setattr(network.provider_network,
+                            field,
+                            getattr(link_params.provider_network, field))
+        net_id = self.do_create_network(account, network, no_rwstatus=True)
+        return net_id
+        
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        """Delete a virtual link
+
+        Arguments:
+            account - a cloud account
+            link_id - id for the virtual-link to be deleted
+
+        Returns:
+            None
+        """
+        if not link_id:
+            logger.error("Empty link_id during the virtual link deletion")
+            raise Exception("Empty link_id during the virtual link deletion")
+
+        port_list = self._get_driver(account).neutron_port_list(**{'network_id': link_id})
+        for port in port_list:
+            if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
+                self.do_delete_port(account, port['id'], no_rwstatus=True)
+        self.do_delete_network(account, link_id, no_rwstatus=True)
+        
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        """Get information about virtual link.
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link 
+
+        Returns:
+            Object of type RwcalYang.VirtualLinkInfoParams
+        """
+        if not link_id:
+            logger.error("Empty link_id during the virtual link get request")
+            raise Exception("Empty link_id during the virtual link get request")
+        
+        drv = self._get_driver(account)
+        network = drv.neutron_network_get(link_id)
+        if network:
+            port_list = drv.neutron_port_list(**{'network_id': network['id']})
+            if 'subnets' in network:
+                subnet = drv.neutron_subnet_get(network['subnets'][0])
+            else:
+                subnet = None
+            virtual_link = RwcalOpenstackPlugin._fill_virtual_link_info(network, port_list, subnet)
+        else:
+            virtual_link = None
+        return virtual_link
+    
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link_list(self, account):
+        """Get information about all the virtual links
+
+        Arguments:
+            account  - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VirtualLinkInfoParams
+        """
+        vnf_resources = RwcalYang.VNFResources()
+        drv = self._get_driver(account)
+        networks = drv.neutron_network_list()
+        for network in networks:
+            port_list = drv.neutron_port_list(**{'network_id': network['id']})
+            if ('subnets' in network) and (network['subnets']):
+                subnet = drv.neutron_subnet_get(network['subnets'][0])
+            else:
+                subnet = None
+            virtual_link = RwcalOpenstackPlugin._fill_virtual_link_info(network, port_list, subnet)
+            vnf_resources.virtual_link_info_list.append(virtual_link)
+        return vnf_resources
+
+    def _create_connection_point(self, account, c_point):
+        """
+        Create a connection point
+        Arguments:
+           account  - a cloud account
+           c_point  - connection_points
+        """
+        port            = RwcalYang.PortInfoItem()
+        port.port_name  = c_point.name
+        port.network_id = c_point.virtual_link_id
+
+        if c_point.type_yang == 'VIRTIO':
+            port.port_type = 'normal'
+        elif c_point.type_yang == 'SR_IOV':
+            port.port_type = 'direct'
+        else:
+            raise NotImplementedError("Port Type: %s not supported" %(c_point.port_type))
+        
+        port_id = self.do_create_port(account, port, no_rwstatus=True)
+        return port_id
+
+    
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        """Create a new virtual deployment unit
+
+        Arguments:
+            account     - a cloud account
+            vdu_init  - information about VDU to create (RwcalYang.VDUInitParams)
+
+        Returns:
+            The vdu_id
+        """
+        ### First create required number of ports aka connection points
+        port_list = []
+        network_list = []
+        drv = self._get_driver(account)
+
+        ### If floating_ip is required and we don't have one, better fail before any further allocation
+        if vdu_init.has_field('allocate_public_address') and vdu_init.allocate_public_address:
+            pool_name = None
+            if account.openstack.has_field('floating_ip_pool'):
+                pool_name = account.openstack.floating_ip_pool
+            floating_ip = drv.nova_floating_ip_create(pool_name)
+        else:
+            floating_ip = None
+        
+        ### Create port in mgmt network
+        port            = RwcalYang.PortInfoItem()
+        port.port_name  = 'mgmt-'+ vdu_init.name
+        port.network_id = drv._mgmt_network_id
+        port.port_type = 'normal'
+        port_id = self.do_create_port(account, port, no_rwstatus=True)
+        port_list.append(port_id)
+
+        
+        for c_point in vdu_init.connection_points:
+            if c_point.virtual_link_id in network_list:
+                assert False, "Only one port per network supported. Refer: http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/nfv-multiple-if-1-net.html"
+            else:
+                network_list.append(c_point.virtual_link_id)
+            port_id = self._create_connection_point(account, c_point)
+            port_list.append(port_id)
+
+        ### Now Create VM
+        vm                     = RwcalYang.VMInfoItem()
+        vm.vm_name             = vdu_init.name
+        vm.flavor_id           = vdu_init.flavor_id
+        vm.image_id            = vdu_init.image_id
+        
+        if vdu_init.has_field('vdu_init') and vdu_init.vdu_init.has_field('userdata'):
+            vm.cloud_init.userdata = vdu_init.vdu_init.userdata
+            
+        vm.user_tags.node_id   = vdu_init.node_id;
+
+        for port_id in port_list:
+            port = vm.port_list.add()
+            port.port_id = port_id
+            
+        pci_assignement = self.prepare_vpci_metadata(drv, vdu_init)
+        if pci_assignement != '':
+            vm.user_tags.pci_assignement = pci_assignement
+
+        vm_id = self.do_create_vm(account, vm, no_rwstatus=True)
+        self.prepare_vdu_on_boot(account, vm_id, floating_ip)
+        return vm_id
+
+    def prepare_vpci_metadata(self, drv, vdu_init):
+        pci_assignement = ''
+        ### TEF specific metadata creation for
+        virtio_vpci = []
+        sriov_vpci = []
+        virtio_meta = ''
+        sriov_meta = ''
+        ### For MGMT interface
+        if vdu_init.has_field('mgmt_vpci'):
+            xx = 'u\''+ drv._mgmt_network_id + '\' :[[u\'' + vdu_init.mgmt_vpci + '\', ' + '\'\']]'
+            virtio_vpci.append(xx)
+
+        for c_point in vdu_init.connection_points:
+            if c_point.has_field('vpci'):
+                if c_point.has_field('vpci') and c_point.type_yang == 'VIRTIO':
+                    xx = 'u\''+c_point.virtual_link_id + '\' :[[u\'' + c_point.vpci + '\', ' + '\'\']]'
+                    virtio_vpci.append(xx)
+                elif c_point.has_field('vpci') and c_point.type_yang == 'SR_IOV':
+                    xx = '[u\'' + c_point.vpci + '\', ' + '\'\']'
+                    sriov_vpci.append(xx)
+                
+        if virtio_vpci:
+            virtio_meta += ','.join(virtio_vpci)
+            
+        if sriov_vpci:
+            sriov_meta = 'u\'VF\': ['
+            sriov_meta += ','.join(sriov_vpci)
+            sriov_meta += ']'
+
+        if virtio_meta != '':
+            pci_assignement +=  virtio_meta
+            pci_assignement += ','
+            
+        if sriov_meta != '':
+            pci_assignement +=  sriov_meta
+            
+        if pci_assignement != '':
+            pci_assignement = '{' + pci_assignement + '}'
+            
+        return pci_assignement
+    
+
+        
+    def prepare_vdu_on_boot(self, account, server_id, floating_ip):
+        cmd = PREPARE_VM_CMD.format(auth_url     = account.openstack.auth_url,
+                                    username     = account.openstack.key,
+                                    password     = account.openstack.secret,
+                                    tenant_name  = account.openstack.tenant,
+                                    mgmt_network = account.openstack.mgmt_network,
+                                    server_id    = server_id)
+        
+        if floating_ip is not None:
+            cmd += (" --floating_ip "+ floating_ip.ip)
+
+        exec_path = 'python3 ' + os.path.dirname(openstack_drv.__file__)
+        exec_cmd = exec_path+'/'+cmd
+        logger.info("Running command: %s" %(exec_cmd))
+        subprocess.call(exec_cmd, shell=True)
+        
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        """Modify Properties of existing virtual deployment unit
+
+        Arguments:
+            account     -  a cloud account
+            vdu_modify  -  Information about VDU Modification (RwcalYang.VDUModifyParams)
+        """
+        ### First create required number of ports aka connection points
+        port_list = []
+        network_list = []
+        for c_point in vdu_modify.connection_points_add:
+            if c_point.virtual_link_id in network_list:
+                assert False, "Only one port per network supported. Refer: http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/nfv-multiple-if-1-net.html"
+            else:
+                network_list.append(c_point.virtual_link_id)
+            port_id = self._create_connection_point(account, c_point)
+            port_list.append(port_id)
+
+        ### Now add the ports to VM
+        for port_id in port_list:
+            self._get_driver(account).nova_server_add_port(vdu_modify.vdu_id, port_id)
+
+        ### Delete the requested connection_points
+        for c_point in vdu_modify.connection_points_remove:
+            self.do_delete_port(account, c_point.connection_point_id, no_rwstatus=True)
+
+        if vdu_modify.has_field('image_id'):
+            self._get_driver(account).nova_server_rebuild(vdu_modify.vdu_id, vdu_modify.image_id)
+    
+        
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        """Delete a virtual deployment unit
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu to be deleted
+
+        Returns:
+            None
+        """
+        if not vdu_id:
+            logger.error("empty vdu_id during the vdu deletion")
+            return
+        drv = self._get_driver(account)
+
+        ### Get list of floating_ips associated with this instance and delete them
+        floating_ips = [ f for f in drv.nova_floating_ip_list() if f.instance_id == vdu_id ]
+        for f in floating_ips:
+            drv.nova_drv.floating_ip_delete(f)
+
+        ### Get list of port on VM and delete them.
+        port_list = drv.neutron_port_list(**{'device_id': vdu_id})
+        for port in port_list:
+            if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
+                self.do_delete_port(account, port['id'], no_rwstatus=True)
+        self.do_delete_vm(account, vdu_id, no_rwstatus=True)
+                
+    
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        """Get information about a virtual deployment unit.
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu
+
+        Returns:
+            Object of type RwcalYang.VDUInfoParams
+        """
+        drv = self._get_driver(account)
+
+        ### Get list of ports excluding the one for management network
+        port_list = [p for p in drv.neutron_port_list(**{'device_id': vdu_id}) if p['network_id'] != drv.get_mgmt_network_id()]
+
+        vm = drv.nova_server_get(vdu_id)
+
+        flavor_info = None
+        if ('flavor' in vm) and ('id' in vm['flavor']):
+            try:
+                flavor_info = drv.nova_flavor_get(vm['flavor']['id'])
+            except Exception as e:
+                logger.critical("Exception encountered while attempting to get flavor info for flavor_id: %s. Exception: %s" %(vm['flavor']['id'], str(e)))
+                
+        return RwcalOpenstackPlugin._fill_vdu_info(vm,
+                                                   flavor_info,
+                                                   account.openstack.mgmt_network,
+                                                   port_list)
+        
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu_list(self, account):
+        """Get information about all the virtual deployment units
+
+        Arguments:
+            account     - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VDUInfoParams
+        """
+        vnf_resources = RwcalYang.VNFResources()
+        drv = self._get_driver(account)
+        vms = drv.nova_server_list()
+        for vm in vms:
+            ### Get list of ports excluding one for management network
+            port_list = [p for p in drv.neutron_port_list(**{'device_id': vm['id']}) if p['network_id'] != drv.get_mgmt_network_id()]
+
+            flavor_info = None
+
+            if ('flavor' in vm) and ('id' in vm['flavor']):
+                try:
+                    flavor_info = drv.nova_flavor_get(vm['flavor']['id'])
+                except Exception as e:
+                    logger.critical("Exception encountered while attempting to get flavor info for flavor_id: %s. Exception: %s" %(vm['flavor']['id'], str(e)))
+            else:
+                flavor_info = None
+            vdu = RwcalOpenstackPlugin._fill_vdu_info(vm,
+                                                      flavor_info,
+                                                      account.openstack.mgmt_network,
+                                                      port_list)
+            vnf_resources.vdu_info_list.append(vdu)
+        return vnf_resources
+    
+    
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_vsphere/CMakeLists.txt b/modules/core/rwvx/rwcal/plugins/vala/rwcal_vsphere/CMakeLists.txt
new file mode 100644 (file)
index 0000000..775586d
--- /dev/null
@@ -0,0 +1,8 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwcal_vsphere rwcal_vsphere.py)
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_vsphere/Makefile b/modules/core/rwvx/rwcal/plugins/vala/rwcal_vsphere/Makefile
new file mode 100644 (file)
index 0000000..345c5f3
--- /dev/null
@@ -0,0 +1,24 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_vsphere/rift/vsphere/vsphere.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_vsphere/rift/vsphere/vsphere.py
new file mode 100644 (file)
index 0000000..774b536
--- /dev/null
@@ -0,0 +1,74 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import libcloud.compute.providers
+import libcloud.compute.types
+
+from gi.repository import RwcalYang
+
+
+from . import core
+
+
+class Vsphere(core.Cloud):
+    """This class implements the abstract methods in the Cloud class.
+    This is the Vsphere CAL driver."""
+
+    def __init__(self):
+        super(Vsphere, self).__init__()
+        self._driver_class = libcloud.compute.providers.get_driver(
+                libcloud.compute.providers.Provider.VSPHERE)
+
+    def driver(self, account):
+        return self._driver_class(
+                username=account.username,
+                passwork=account.password,
+                url=url,
+                )
+
+    def get_image_list(self, account):
+        """
+        Return a list of the names of all available images.
+        """
+        images = self.driver(account).list_images()
+        return [image.name for image in images]
+
+    def create_vm(self, account, vminfo):
+        """
+        Create a new virtual machine.
+
+        @param account  - account information used authenticate the create of
+                          the virtual machine 
+        @param vmfinfo  - information about the virtual machine to create
+
+        """
+        node = self.driver(account).ex_create_node_from_template(
+                name=vminfo.vm_name,
+                template=vminfo.vsphere.template,
+                )
+
+        vminfo.vm_id = node.id
+
+        return node.id
+
+    def delete_vm(self, account, vm_id):
+        """
+        delete a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        node = Node()
+        node.id = vm_id
+        self.driver(account).destroy_node(node)
+
+    def reboot_vm(self, account, vm_id):
+        """
+        Reboot a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        node = Node()
+        node.id = vm_id
+        self.driver(account).reboot_node(node)
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_vsphere/rwcal_vsphere.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_vsphere/rwcal_vsphere.py
new file mode 100644 (file)
index 0000000..698af19
--- /dev/null
@@ -0,0 +1,222 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import logging
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+import rw_status
+import rwlogger
+
+logger = logging.getLogger('rwcal.vsphere')
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+class ImageLocationError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class RwcalVspherePlugin(GObject.Object, RwCal.Cloud):
+    """This class implements the CAL VALA methods for Vsphere.
+    """
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rwcal.vsphere",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+            
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        raise NotImplementedError()
+
+    
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        raise NotImplementedError()
+    
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        raise NotImplementedError()        
+            
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        raise NotImplementedError()
+    
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        raise NotImplementedError()        
+    
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[""])
+    def do_get_virtual_link_list(self, account):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        raise NotImplementedError()            
+    
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        raise NotImplementedError()
+    
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_vdu_list(self, account):
+        raise NotImplementedError()        
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_zk/CMakeLists.txt b/modules/core/rwvx/rwcal/plugins/vala/rwcal_zk/CMakeLists.txt
new file mode 100644 (file)
index 0000000..78529f3
--- /dev/null
@@ -0,0 +1,8 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwcal_zk rwcal_zk.py)
diff --git a/modules/core/rwvx/rwcal/plugins/vala/rwcal_zk/rwcal_zk.py b/modules/core/rwvx/rwcal/plugins/vala/rwcal_zk/rwcal_zk.py
new file mode 100644 (file)
index 0000000..a1cfcbb
--- /dev/null
@@ -0,0 +1,163 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import logging
+
+import kazoo.exceptions
+
+import gi
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwTypes', '1.0')
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes)
+
+import rw_status
+import rwlogger
+
+import rift.cal
+import rift.cal.rwzk
+
+logger = logging.getLogger('rwcal')
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+                IndexError: RwTypes.RwStatus.NOTFOUND,
+                KeyError: RwTypes.RwStatus.NOTFOUND,
+
+                kazoo.exceptions.NodeExistsError: RwTypes.RwStatus.EXISTS,
+                kazoo.exceptions.NoNodeError: RwTypes.RwStatus.NOTFOUND,
+                kazoo.exceptions.NotEmptyError: RwTypes.RwStatus.NOTEMPTY,
+                kazoo.exceptions.LockTimeout: RwTypes.RwStatus.TIMEOUT,
+
+                rift.cal.rwzk.UnlockedError: RwTypes.RwStatus.NOTCONNECTED,
+           })
+
+class ZookeeperPlugin(GObject.Object, RwCal.Zookeeper):
+    def __init__(self):
+      GObject.Object.__init__(self)
+      self._cli = None
+
+    @rwstatus
+    def do_create_server_config(self, zkid, unique_ports, server_names):
+        rift.cal.rwzk.create_server_config(zkid, unique_ports, server_names)
+
+    @rwstatus
+    def do_server_start(self, zkid):
+        rift.cal.rwzk.server_start(zkid)
+
+    @rwstatus
+    def do_kazoo_init(self, unique_ports, server_names):
+        if self._cli is not None:
+            if isinstance(rift.cal.rwzk.Kazoo, self._cli):
+                return
+            else:
+                raise AttributeError('Zookeeper client was already initialized')
+
+        self._cli = rift.cal.rwzk.Kazoo()
+        self._cli.client_init(unique_ports, server_names)
+
+    @rwstatus
+    def do_zake_init(self):
+        if self._cli is not None:
+            if isinstance(rift.cal.rwzk.Zake, self._cli):
+                return
+            else:
+                raise AttributeError('Zookeeper client was already initialized')
+
+        self._cli = rift.cal.rwzk.Zake()
+        self._cli.client_init('', '')
+
+    @rwstatus
+    def do_lock(self, path, timeout):
+        if timeout == 0.0:
+            timeout = None
+
+        self._cli.lock_node(path, timeout)
+
+    @rwstatus
+    def do_unlock(self, path):
+        self._cli.unlock_node(path)
+
+    def do_locked(self, path):
+        try:
+            return self._cli.locked(path)
+        except kazoo.exceptions.NoNodeError:
+            # A node that doesn't exist can't really be locked.
+            return False
+
+    @rwstatus
+    def do_create(self, path, closure=None):
+        if not closure:
+            self._cli.create_node(path)
+        else:
+            self._cli.acreate_node(path, closure.callback)
+
+    def do_exists(self, path):
+        return self._cli.exists(path)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get(self, path, closure=None):
+        if not closure:
+            data = self._cli.get_node_data(path)
+            return data.decode()
+        self._cli.aget_node_data(path, closure.store_data, closure.callback)
+        return 0
+
+
+    @rwstatus
+    def do_set(self, path, data, closure=None):
+        if not closure:
+            self._cli.set_node_data(path, data.encode(), None)
+        else:
+            self._cli.aset_node_data(path, data.encode(), closure.callback)
+
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_children(self, path, closure=None):
+        if not closure:
+            return self._cli.get_node_children(path)
+        self._cli.aget_node_children(path, closure.store_data, closure.callback)
+        return 0
+
+    @rwstatus
+    def do_rm(self, path, closure=None):
+        if not closure:
+            self._cli.delete_node(path)
+        else:
+            self._cli.adelete_node(path, closure.callback)
+
+    @rwstatus
+    def do_register_watcher(self, path, closure):
+        self._cli.register_watcher(path, closure.callback)
+
+    @rwstatus
+    def do_unregister_watcher(self, path, closure):
+        self._cli.unregister_watcher(path, closure.callback)
+
+
+
+
+def main():
+    @rwstatus
+    def blah():
+        raise IndexError()
+
+    a = blah()
+    assert(a == RwTypes.RwStatus.NOTFOUND)
+
+    @rwstatus({IndexError: RwTypes.RwStatus.NOTCONNECTED})
+    def blah2():
+        """Some function"""
+        raise IndexError()
+
+    a = blah2()
+    assert(a == RwTypes.RwStatus.NOTCONNECTED)
+    assert(blah2.__doc__ == "Some function")
+
+if __name__ == '__main__':
+    main()
+
diff --git a/modules/core/rwvx/rwcal/plugins/yang/CMakeLists.txt b/modules/core/rwvx/rwcal/plugins/yang/CMakeLists.txt
new file mode 100644 (file)
index 0000000..f242298
--- /dev/null
@@ -0,0 +1,25 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# 
+
+##
+# Parse the yang files
+##
+
+include(rift_yang)
+include(rift_cmdargs)
+
+set(source_yang_files rwcal.yang)
+
+rift_add_yang_target(
+  TARGET rwcal_yang
+  YANG_FILES ${source_yang_files}
+  COMPONENT ${PKG_LONG_NAME}
+  LIBRARIES
+    rwschema_yang_gen
+    rwyang
+    rwlog
+    rwlog-mgmt_yang_gen
+)
+
diff --git a/modules/core/rwvx/rwcal/plugins/yang/Makefile b/modules/core/rwvx/rwcal/plugins/yang/Makefile
new file mode 100644 (file)
index 0000000..345c5f3
--- /dev/null
@@ -0,0 +1,24 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/modules/core/rwvx/rwcal/plugins/yang/rwcal.yang b/modules/core/rwvx/rwcal/plugins/yang/rwcal.yang
new file mode 100755 (executable)
index 0000000..be50dba
--- /dev/null
@@ -0,0 +1,1133 @@
+
+/*
+ * 
+ * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+ *
+ *
+ */
+
+module rwcal
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rwcal";
+  prefix "rwcal";
+
+  import rw-base {
+    prefix rwbase;
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-log {
+    prefix "rwlog";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  revision 2014-12-30 {
+    description
+        "Initial revision.";
+    reference
+        "RIFT RWCAL cloud data";
+  }
+
+  typedef cloud-account-type {
+    description "cloud account type";
+    type enumeration {
+      enum aws;
+      enum cloudsim;
+      enum cloudsim_proxy;
+      enum mock;
+      enum openmano;
+      enum openstack;
+      enum vsphere;
+    }
+  }
+
+  typedef connection-status {
+    description "Connection status for the cloud account";
+    type enumeration {
+      enum unknown;
+      enum validating;
+      enum success;
+      enum failure;
+    }
+  }
+
+  grouping connection-status {
+    container connection-status {
+      config false;
+      rwpb:msg-new CloudConnectionStatus;
+      leaf status {
+        type connection-status;
+      }
+      leaf details {
+        type string;
+      }
+    }
+  }
+
+  uses connection-status;
+
+  typedef sdn-account-type {
+    description "SDN account type";
+    type enumeration {
+      enum odl;
+      enum mock;
+      enum sdnsim;
+    }
+  }
+
+  grouping sdn-provider-auth {
+    leaf account-type {
+      type sdn-account-type;
+    }
+
+    choice provider-specific-info {
+      container odl {
+        leaf username {
+          type string {
+            length "1..255";
+          }
+        }
+
+        leaf password {
+          type string {
+            length "1..32";
+          }
+        }
+
+        leaf url {
+          type string {
+            length "1..255";
+          }
+        }
+      }
+      container mock {
+        leaf username {
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwsdn_mock";
+        }
+      }
+
+      container sdnsim {
+        leaf username {
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwsdn_sim";
+        }
+      }
+    }
+  }
+
+  grouping provider-auth {
+    leaf account-type {
+      type cloud-account-type;
+    }
+
+    choice provider-specific-info {
+      container mock {
+        leaf username {
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwcal_mock";
+        }
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+      }
+      container aws {
+        leaf key {
+          type string;
+        }
+
+        leaf secret {
+          type string;
+        }
+
+        leaf region {
+          type string;
+        }
+        leaf vpcid {
+          description "VPC ID to use to instantiate EC2 instances";
+          type string;
+        }
+        leaf ssh-key {
+          description "Key pair name to connect to EC2 instance";
+          type string;
+        }
+        leaf availability-zone {
+          description "Availability zone where EC2 instance should
+              be started";
+          type string;
+        }
+        leaf default-subnet-id {
+          description "Default subnet ID to create network
+              interface at instance creation time";
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwcal_aws";
+        }
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+      }
+
+      container openstack {
+        leaf key {
+          type string;
+          mandatory true;
+        }
+
+        leaf secret {
+          type string;
+          mandatory true;
+        }
+
+        leaf auth_url {
+          type string;
+          mandatory true;
+        }
+
+        leaf tenant {
+          type string;
+          mandatory true;
+        }
+
+        leaf admin {
+          type boolean;
+          default false;
+        }
+
+        leaf mgmt-network {
+          type string;
+          mandatory true;
+        }
+
+        leaf plugin-name {
+          type string;
+          default "rwcal_openstack";
+        }
+
+        leaf-list security-groups {
+          type string;
+          description "Names of the security groups for the VM";
+        }
+        
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+
+        leaf floating-ip-pool {
+          type string;
+          description "Name of floating IP pool to use for floating IP address assignement";
+        }
+        
+      }
+
+      container openmano {
+        leaf host {
+          type string;
+          default "localhost";
+        }
+
+        leaf port {
+          type uint16;
+          default 9090;
+        }
+
+        leaf tenant-id {
+          type string {
+            length "36";
+          }
+          mandatory true;
+        }
+
+        leaf plugin-name {
+          type string;
+          default "rwcal_openmano";
+        }
+      }
+
+      container vsphere {
+        leaf username {
+          type string;
+        }
+
+        leaf password {
+          type string;
+        }
+
+        leaf url {
+          type string;
+        }
+
+        leaf plugin-name {
+          type string;
+          default "rwcal-python";
+        }
+
+        leaf dynamic-flavor-support {
+          type boolean;
+          default false;
+        }
+      }
+
+      container cloudsim {
+        leaf username {
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwcal_cloudsim";
+        }
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+      }
+
+      container cloudsim_proxy {
+        leaf username {
+          type string;
+        }
+        leaf host {
+          type string;
+          default "localhost";
+        }
+        leaf plugin-name {
+          type string;
+          default "rwcal_cloudsimproxy";
+        }
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+      }
+    }
+  }
+
+  grouping vm-info-item {
+    leaf vm-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf vm-size {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf vm-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf flavor-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf state {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf availability-zone {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf tenant-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf host-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf management-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf public-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    list private-ip-list {
+      key "ip-address";
+
+      leaf ip-address {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list public-ip-list {
+      key "ip-address";
+
+      leaf ip-address {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list port-list {
+      key "port-id";
+      leaf port-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list network-list {
+      key "network-id";
+      leaf network-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    container cloud-init {
+      leaf userdata {
+        description
+            "The userdata field for cloud-init should contain
+             the contents of the script that cloud-init should
+             invoke when configuring the system. Note that this
+             script is expected to be in the cloud-config format";
+        type string;
+      }
+    }
+
+    container user_tags {
+
+      leaf node-id {
+        type string;
+      }
+
+      leaf pci_assignement {
+        type string;
+      }
+
+      leaf tag1 {
+        type string;
+      }
+    }
+  }
+
+  grouping image-info-item {
+    leaf id {
+      type string;
+    }
+
+    leaf name {
+      type string;
+    }
+
+    leaf location {
+      description "Image URL location";
+      type string;
+    }
+
+    leaf checksum {
+      type string;
+    }
+
+    leaf virtual_size_mbytes {
+      description "Virtual size of the image";
+      type uint64;
+    }
+
+    leaf disk_format {
+      description "Format of the Disk";
+      type enumeration {
+        enum ami;
+        enum ari;
+        enum aki;
+        enum vhd;
+        enum vmdk;
+        enum raw;
+        enum qcow2;
+        enum vdi;
+        enum iso;
+      }
+      default "qcow2";
+    }
+
+    leaf container_format {
+      description "Format of the container";
+      type enumeration{
+        enum ami;
+        enum ari;
+        enum aki;
+        enum bare;
+        enum ovf;
+      }
+      default "bare";
+    }
+
+    leaf state {
+      description "State of the Image object in CAL";
+      type enumeration {
+        enum active;
+        enum inactive;
+        enum failed;
+        enum unknown;
+      }
+      default "unknown";
+    }
+
+    container user-tags {
+      description "User tags associated with Image";
+      leaf checksum {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+  }
+
+  grouping network-info-item {
+    leaf network-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf network-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf subnet {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    uses manotypes:provider-network;
+  }
+
+  grouping port-info-item {
+    leaf port-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf port-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf port-state {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf network-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf ip-address {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf vm-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf port-type {
+      description "Type of the port";
+      type enumeration {
+        enum normal;
+        enum macvtap;
+        enum direct;
+      }
+      default "normal";
+    }
+
+    choice provider-specific-info {
+      container lxc {
+        leaf veth-name {
+          type string;
+        }
+      }
+    }
+  }
+
+  container cloud-accounts {
+    list cloud-account-list {
+      rwpb:msg-new CloudAccount;
+      key "name";
+
+      leaf name {
+        type string;
+      }
+      uses provider-auth;
+    }
+  }
+
+  container vim-resources {
+    rwpb:msg-new VimResources;
+    config false;
+
+    list vminfo-list {
+      rwpb:msg-new VMInfoItem;
+      config false;
+      key "vm-id";
+
+      uses vm-info-item;
+    }
+
+    list imageinfo-list {
+      rwpb:msg-new ImageInfoItem;
+      config false;
+      key "id";
+
+      uses image-info-item;
+    }
+
+    list tenantinfo-list {
+      rwpb:msg-new TenantInfoItem;
+      config false;
+      key "tenant-id";
+
+      leaf tenant-name {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf tenant-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list userinfo-list {
+      rwpb:msg-new UserInfoItem;
+      config false;
+      key "user-id";
+
+      leaf user-name{
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf user-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list roleinfo-list {
+      rwpb:msg-new RoleInfoItem;
+      config false;
+      key "role-id";
+
+      leaf role-name {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf role-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list hostinfo-list {
+      rwpb:msg-new HostInfoItem;
+      config false;
+      key "host-id";
+
+      leaf host-name {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf host-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list networkinfo-list {
+      rwpb:msg-new NetworkInfoItem;
+      config false;
+      key "network-id";
+
+      uses network-info-item;
+    }
+
+    list portinfo-list {
+      rwpb:msg-new PortInfoItem;
+      config false;
+      key "port-id";
+
+      uses port-info-item;
+    }
+
+    list flavorinfo-list {
+      rwpb:msg-new FlavorInfoItem;
+      config false;
+      key "id";
+
+      leaf id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf name {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      uses manotypes:vm-flavor;
+      uses manotypes:guest-epa;
+      uses manotypes:vswitch-epa;
+      uses manotypes:hypervisor-epa;
+      uses manotypes:host-epa;
+    }
+  }
+
+  grouping virtual-link-create-params {
+    leaf name {
+      description "Name of the Virtual-Link";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf subnet {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+    leaf associate-public-ip {
+      type boolean;
+      default false;
+    }
+    uses manotypes:provider-network;
+  }
+
+
+  container virtual-link-req-params {
+    description "This object defines the parameters required to create a virtual-link";
+    rwpb:msg-new VirtualLinkReqParams;
+    uses virtual-link-create-params;
+  }
+
+
+  grouping connection-point-type {
+    leaf type {
+      description
+          "Specifies the type of connection point
+             VIRTIO          : Use the traditional VIRTIO interface.
+             PCI-PASSTHROUGH : Use PCI-PASSTHROUGH interface.
+             SR-IOV          : Use SR-IOV interface.";
+      type enumeration {
+        enum VIRTIO;
+        enum PCI-PASSTHROUGH;
+        enum SR-IOV;
+      }
+      default "VIRTIO";
+    }
+  }
+
+
+  grouping vdu-create-params {
+    leaf name {
+      description "Name of the VDU";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf mgmt-vpci {
+      description
+          "Specifies the virtual PCI address. Expressed in
+           the following format dddd:dd:dd.d. For example
+           0000:00:12.0. This information can be used to
+           pass as metadata during the VM creation.";
+      type string;
+    }
+
+    uses manotypes:vm-flavor;
+    uses manotypes:guest-epa;
+    uses manotypes:vswitch-epa;
+    uses manotypes:hypervisor-epa;
+    uses manotypes:host-epa;
+
+    leaf node-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf flavor-id {
+      description "CAL assigned flavor-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-id {
+      description "CAL assigned image-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-name {
+      description "Image name which can be used to lookup the image-id";
+      type string;
+      rwpb:field-inline "true";
+      rwpb:field-string-max 256;
+    }
+
+    leaf image-checksum {
+      description "Image md5sum checksum used in combination with image name to lookup image-id ";
+      type string;
+      rwpb:field-inline "true";
+      rwpb:field-string-max 32;
+    }
+
+    list connection-points {
+      key "name";
+      leaf name {
+        description "Name of the connection point";
+        type string;
+      }
+      leaf virtual-link-id {
+        description "CAL assigned resource Id for the Virtual Link";
+        type string;
+      }
+      leaf associate-public-ip {
+        type boolean;
+        default false;
+      }
+      leaf vpci {
+        description
+            "Specifies the virtual PCI address. Expressed in
+             the following format dddd:dd:dd.d. For example
+             0000:00:12.0. This information can be used to
+             pass as metadata during the VM creation.";
+        type string;
+      }
+
+      uses connection-point-type;
+    }
+
+    leaf allocate-public-address {
+      description "If this VDU needs public IP address";
+      type boolean;
+      default false;
+    }
+
+    container vdu-init {
+      leaf userdata {
+        description
+            "The userdata field for vdu-init should contain
+             the contents of the script that cloud-init should
+             invoke when configuring the system. Note that this
+             script is expected to be in the cloud-config format";
+        type string;
+      }
+    }
+  }
+
+  container vdu-init-params {
+    description "This object defines the parameters required to create a VDU";
+    rwpb:msg-new VDUInitParams;
+    uses vdu-create-params;
+  }
+
+  container vdu-modify-params {
+    description "This object defines the parameters required to modify VDU";
+    rwpb:msg-new VDUModifyParams;
+
+    leaf vdu-id {
+      description "CAL assigned id for VDU to which this connection point belongs";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-id {
+      description "CAL assigned image-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    list connection-points-add {
+      key "name";
+      leaf name {
+        description "Name of the connection point";
+        type string;
+      }
+      leaf virtual-link-id {
+        description "CAL assigned resource Id for the Virtual Link";
+        type string;
+      }
+      leaf associate-public-ip {
+        type boolean;
+        default false;
+      }
+
+      uses connection-point-type;
+    }
+
+    list connection-points-remove {
+      key "connection-point-id";
+      leaf connection-point-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+  }
+
+  grouping connection-point-info-params {
+    leaf connection-point-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf name {
+      description "Name of the connection point";
+      type string;
+    }
+
+    leaf virtual-link-id {
+      description "CAL assigned resource ID of the Virtual-Link";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf vdu-id {
+      description "CAL assigned id for VDU to which this connection point belongs";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf state {
+      description "CMP agnostic generic state of the connection point";
+      type enumeration {
+        enum active;
+        enum inactive;
+        enum failed;
+        enum unknown;
+      }
+    }
+
+    leaf ip-address {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf public-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+  }
+
+  grouping virtual-link-info-params {
+    leaf name {
+      description "Name of the Virtual-Link";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf state {
+      description "State of the Virtual Link";
+      type enumeration {
+        enum active;
+        enum inactive;
+        enum failed;
+        enum unknown;
+      }
+      default "unknown";
+    }
+
+    leaf virtual-link-id {
+      description "CAL assigned resource ID of the Virtual-Link";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    list connection-points {
+      key connection-point-id;
+      uses connection-point-info-params;
+    }
+
+    leaf subnet {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    uses manotypes:provider-network;
+
+  }
+
+  grouping vdu-info-params {
+    leaf vdu-id {
+      description "CAL assigned id for VDU";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+    leaf name {
+      description "Name of the VDU";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf flavor-id {
+      description "CAL assigned flavor-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-id {
+      description "CAL assigned image-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf node-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf state {
+      description "State of the VDU";
+      type enumeration {
+        enum active;
+        enum inactive;
+        enum failed;
+        enum unknown;
+      }
+      default "unknown";
+    }
+
+    leaf management-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf public-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    uses manotypes:vm-flavor;
+    uses manotypes:guest-epa;
+    uses manotypes:vswitch-epa;
+    uses manotypes:hypervisor-epa;
+    uses manotypes:host-epa;
+
+    list connection-points {
+      key connection-point-id;
+      uses connection-point-info-params;
+    }
+  }
+
+  container vnf-resources {
+    rwpb:msg-new VNFResources;
+    config false;
+
+    list virtual-link-info-list {
+      rwpb:msg-new VirtualLinkInfoParams;
+      config false;
+      key virtual-link-id;
+      uses virtual-link-info-params;
+    }
+
+    list vdu-info-list {
+      rwpb:msg-new VDUInfoParams;
+      config false;
+      key vdu-id;
+      uses vdu-info-params;
+    }
+  }
+}
+
+/* vim: set ts=2:sw=2: */
diff --git a/modules/core/rwvx/rwcal/rift/cal/rwzk.py b/modules/core/rwvx/rwcal/rift/cal/rwzk.py
new file mode 100644 (file)
index 0000000..479ac66
--- /dev/null
@@ -0,0 +1,554 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import abc
+import collections
+import inspect
+import os
+import weakref
+
+import kazoo.client
+import kazoo.exceptions
+import zake.fake_client
+
+class UnlockedError(Exception):
+    pass
+
+def create_server_config(myid, unique_ports, server_names):
+    """
+    This function generates the configuration file needed
+    for the zookeeper server.
+
+    An example configutaion is
+    shown below:
+            tickTime=2000
+            dataDir=/var/zookeeper
+            clientPort=2181
+            initLimit=5
+            syncLimit=2
+            server.1=zoo1:2888:3888
+            server.2=zoo2:2888:3888
+            server.3=zoo3:2888:3888
+
+    To test multiple servers on a single machine, specify the servername
+    as localhost with unique quorum & leader election ports
+    (i.e. 2888:3888, 2889:3889, 2890:3890 in the example above) for each
+    server.X in that server's config file.  Of course separate dataDirs
+    and distinct clientPorts are also necessary (in the above replicated
+    example, running on a single localhost, you would still have three
+    config files)."
+
+
+    @param myid         - the id of the zookeeper server in the ensemble
+    @param unique_ports - Assign unique ports to zookeeper clients
+    @param server_names - list of server names in the ensemble
+    """
+    install_dir = os.environ.get('INSTALLDIR', '')
+
+    config_fname = "%s/etc/zookeeper/server-%d.cfg" % (install_dir, myid)
+
+    config = "tickTime=2000\n"
+    config += "dataDir=%s/zk/server-%d/data\n" % (install_dir, myid)
+
+    if unique_ports:
+        # Mangle the port ids based on uid,
+        # we need to find a more permanent solution
+        uid = os.getuid()
+        uid = (uid - UID_BASE)%MAX_ENSEMBLE_SIZE
+        config += "clientPort=%d\n" % (uid+2181)
+    else:
+        config += "clientPort=2181\n"
+
+    config += "initLimit=5\n"
+    config += "syncLimit=2\n"
+
+    quorum_port = 2888
+    election_port = 3888
+    for server_name in server_names:
+        if unique_ports:
+            quorum_port += 1
+            election_port += 1
+        config += "%s:%d:%d\n" % (server_name, quorum_port, election_port)
+
+    if not os.path.isdir(os.path.dirname(config_fname)):
+        os.makedirs(os.path.dirname(config_fname))
+
+    with open(config_fname, "w") as config_file:
+        config_file.write(config)
+
+def to_java_compatible_path(path):
+    if os.name == 'nt':
+        path = path.replace('\\', '/')
+    return path
+
+def server_start(myid):
+    """
+    Start the zookeeper server
+
+    @param myid - the id of the zookeeper server in the ensemble
+    """
+    install_dir = os.environ.get('INSTALLDIR')
+
+    classpath = ':'.join([
+        "/etc/zookeeper",
+        "/usr/share/java/slf4j/slf4j-log4j12.jar",
+        "/usr/share/java/slf4j/slf4j-api.jar",
+        "/usr/share/java/netty.jar",
+        "/usr/share/java/log4j.jar",
+        "/usr/share/java/jline.jar",
+        "/usr/share/java/zookeeper/zookeeper.jar"])
+
+    log4j_path = os.path.join("%s/etc/zookeeper/" % (install_dir), "log4j-%d.properties" % (myid,))
+    import socket
+    with open(log4j_path, "w") as log4j:
+            log4j.write("""
+# DEFAULT: console appender only
+log4j.rootLogger=INFO, ZLOG
+log4j.appender.ZLOG.layout=org.apache.log4j.PatternLayout
+log4j.appender.ZLOG.layout.ConversionPattern=%d{ISO8601} [""" + socket.getfqdn()  +"""] - %-5p [%t:%C{1}@%L] - %m%n
+log4j.appender.ZLOG=org.apache.log4j.RollingFileAppender
+log4j.appender.ZLOG.Threshold=DEBUG
+log4j.appender.ZLOG.File=""" + to_java_compatible_path(  # NOQA
+                "%s/zk/server-%d/log" % (install_dir, myid) + os.sep + "zookeeper.log\n"))
+
+
+    argv = [
+        '/usr/bin/java',
+        '-cp',
+        classpath,
+        #'-Dlog4j.debug',
+        '-Dzookeeper.log.dir="%s"' % (install_dir,),
+        '-Dlog4j.configuration=file:%s' % log4j_path,
+        '-Dcom.sun.management.jmxremote',
+        '-Dcom.sun.management.jmxremote.local.only=false',
+        'org.apache.zookeeper.server.quorum.QuorumPeerMain',
+        '%s/etc/zookeeper/server-%d.cfg' % (install_dir, myid),
+    ]
+
+    print('Running zookeeper: %s' %  (' '.join(argv),))
+
+    pid = os.fork()
+    if pid < 0:
+        raise OSError("Failed to fork")
+    elif pid == 0:
+        # instruct the child process to TERM when the parent dies
+        import ctypes
+        libc = ctypes.CDLL('/lib64/libc.so.6')
+        PR_SET_PDEATHSIG = 1; TERM = 15
+        libc.prctl(PR_SET_PDEATHSIG, TERM)
+
+        os.execv(argv[0], argv)
+
+        # Should never reach here CRASH
+        raise OSError("execv() failed")
+
+class NodeWatcher(object):
+    def __init__(self, zkcli, path):
+        """
+        Create a watcher for the given zookeeper path.  This
+        serves as a single place for interested parties to
+        register callbacks when the given path changes.  This is
+        used instead of the kazoo DataWatcher recipe as:
+            - It stores references to callbacks indefinitely
+            - It does not support methods
+            - It fires twice for every event
+        Obviously, none of that is an issue with -this- object!
+
+        @param zkcli    - zookeeper client
+        @param path     - path to monitor
+        """
+        self._zkcli = zkcli
+        self._path = path
+
+        # Weak references to the registered functions/methods.  This
+        # is so the reference count does not increase leading to the
+        # callbacks persisting only due to their use here.
+        self._funcs = weakref.WeakSet()
+        self._methods = weakref.WeakKeyDictionary()
+        self._calls = []
+
+    @property
+    def registered(self):
+        """
+        The number of callbacks currently registered
+        """
+        return len(self._funcs) + len(self._methods) + len(self._calls)
+
+    def _on_event(self, event):
+        """
+        Handle a zookeeper event on the monitored path.  This
+        will execute every callback once.  If the callback accepts
+        at least two argument (three for methods) then the event type
+        and event path will be passed as the first two arguments.
+        """
+        def call(function, obj=None):
+            # The gi module does a lot of magic.  Just importing gi will not
+            # load the FunctionInfo type.  This seems to be the easiest way
+            # to check without being able to use isinstance.
+            if 'gi.FunctionInfo' in str(type(function)):
+                if len(function.get_arguments()) >= 2:
+                    function(event.type, event.path)
+                else:
+                    function()
+                return
+
+            args, varargs, _, _= inspect.getargspec(function)
+            if obj is not None:
+                if len(args) >= 3 or varargs is not None:
+                    function(obj, event.type, event.path)
+                else:
+                    function(obj)
+            else:
+                if len(args) >= 2 or varargs is not None:
+                    function(event.type, event.path)
+                else:
+                    function()
+
+
+        _ = [call(f) for f in self._funcs]
+
+        for obj, methods in self._methods.items():
+            _ = [call(m, obj=obj) for m in methods]
+
+        _ = [call(f) for f in self._calls]
+
+        if self.registered > 0:
+            _ = self._zkcli.exists(self._path, watch=self._on_event)
+
+    def register(self, slot):
+        """
+        Register the specified method/function as a callback for when
+        the monitored path is changed.
+
+        @param slot - method/function to call when signal is emitted.
+
+        On an event, the slot will be executed with two
+        parameters: the type of event that was generated and the
+        path to the node that changed.
+        """
+        if inspect.ismethod(slot):
+            if not slot.__self__ in self._methods:
+                self._methods[slot.__self__] = set()
+            self._methods[slot.__self__].add(slot.__func__)
+        elif inspect.isfunction(slot):
+            self._funcs.add(slot)
+        elif callable(slot):
+            self._calls.append(slot)
+
+        if self.registered > 0:
+            _ = self._zkcli.exists(self._path, watch=self._on_event)
+
+    def unregister(self, slot):
+        """
+        Unregister the specified method/function.
+
+        @param slot - method/function to unregister.
+        """
+        if inspect.ismethod(slot):
+            if slot.__self__ in self._methods:
+                self._methods[slot.__self__].remove(slot.__func__)
+        elif inspect.isfunction(slot):
+            if slot in self._funcs:
+                self._funcs.remove(slot)
+        elif callable(slot):
+            if slot in self._calls:
+                self._calls.remove(slot)
+
+
+class ZkClient(object):
+    """Abstract class that all the different implementations of zookeeper
+    clients must implement"""
+    __metaclass__ = abc.ABCMeta
+
+    _instance = None
+    def __new__(cls, *args, **kwargs):
+        if not cls._instance:
+            cls._instance = super(ZkClient, cls).__new__(
+                                cls, *args, **kwargs)
+        return cls._instance
+
+    def __init__(self):
+        self._client_context = None
+        self._locks = {}
+        self._node_watchers = {}
+
+    @abc.abstractmethod
+    def client_init(self, unique_ports, server_names, timeout=120):
+        """
+        Initialize the client connection to the zookeeper
+
+        @param unique_ports - Create unique zookeeper ports based on UID
+        @param server_names - List of zookeeper server names
+
+        Note:
+            It would be really nice to provide this information during __init__()
+        However, the CAL is created, and used, by rwvx well before the manifest
+        which contains the unique_ports and server_names settings has been parsed.
+        """
+        pass
+
+    def lock_node(self, path, timeout=None):
+        """
+        Lock a node for writing.  The lock is reentrant.
+
+        @param path     - path to lock
+        @param timeout  - time, in seconds, to wait for the lock
+        @raises         - kazoo.exceptions.NoNodeError if the path does not exist
+                          UnlockedError if lock acquisition fails
+                          kazoo.exceptions.LockTimeout if a timeout was specified and not met
+        """
+        if not self.exists(path):
+            raise kazoo.exceptions.NoNodeError()
+
+        if path in self._locks:
+            lock, count = self._locks[path]
+            count += 1
+            self._locks[path] = lock, count
+        else:
+            lock = kazoo.recipe.lock.Lock(self._client_context, path)
+            self._locks[path] = lock, 1
+
+        if lock.is_acquired:
+            return
+
+        if not lock.acquire(timeout=timeout):
+            raise UnlockedError()
+        return
+
+    def unlock_node(self, path):
+        """
+        Unlock a node for writing.  If the path is not locked by this process,
+        this is a no-op.
+
+        @param path - path to unlock
+        @raises     - kazoo.exceptions.NoNodeError if the path does not exist
+        """
+        if not self.exists(path):
+            raise kazoo.exceptions.NoNodeError()
+
+        if not path in self._locks:
+            return
+
+        lock, count = self._locks[path]
+        count -= 1
+        self._locks[path] = lock, count 
+        if count:
+            return
+
+        self._locks[path][0].release()
+
+    def locked(self, path):
+        """
+        Test if a path is locked or not.
+
+        @param path - path to test
+        @raises     - kazoo.exceptions.NoNodeError if the path does not exist
+        """
+        if not self.exists(path):
+            raise kazoo.exceptions.NoNodeError()
+       
+        # Always create a new lock object as we cannot tell if we currently
+        # are holding a re-entrant lock
+        lock = kazoo.recipe.lock.Lock(self._client_context, path)
+
+        got_lock = lock.acquire(blocking=False)
+        if got_lock:
+            lock.release()
+
+        return not got_lock
+    def my_callback(self, async_obj=None):
+        def call(function, obj=None):
+            # The gi module does a lot of magic.  Just importing gi will not
+            # load the FunctionInfo type.  This seems to be the easiest way
+            # to check without being able to use isinstance.
+            if 'gi.FunctionInfo' in str(type(function)):
+                if len(function.get_arguments()) >= 2:
+                    print (event.type, event.path)
+                    function(event.type, event.path)
+                else:
+                    if obj:
+                        function(obj)
+                    else:
+                        function()
+                return
+
+            args, varargs, _, _= inspect.getargspec(function)
+            if obj is not None:
+                if len(args) >= 3 or varargs is not None:
+                    function(obj, event.type, event.path)
+                else:
+                    function(obj)
+            else:
+                if len(args) >= 2 or varargs is not None:
+                    function(event.type, event.path)
+                else:
+                    function()
+
+        if async_obj._store_node_data:
+            get_obj,_ = async_obj.get(timeout=1000)
+            async_obj._get_obj_list.append(get_obj.decode())
+            _ = call(async_obj._store_node_data, async_obj._get_obj_list)
+        if async_obj._store_children:
+            _ = call(async_obj._store_children, async_obj.get(timeout=1000))
+        _ = call(async_obj._callback)
+        async_obj._callback=None
+
+    def acreate_node(self, path, callback, create_ancestors=True):
+        async_obj = self._client_context.create_async(path, makepath=create_ancestors)
+        async_obj._store_data = None
+        async_obj._store_node_data = None
+        async_obj._store_children = False
+        async_obj._callback = callback
+        async_obj.rawlink(self.my_callback)
+
+
+    def adelete_node(self, path, callback, delete_children=False):
+        async_obj = self._client_context.delete_async(path, recursive=delete_children)
+        async_obj._store_data = None
+        async_obj._store_node_data = None
+        async_obj._store_children = False
+        async_obj._callback = callback
+        async_obj.rawlink(self.my_callback)
+
+    def aget_node_children(self, path, store_children, callback):
+        async_obj = self._client_context.get_children_async(path)
+        async_obj._store_data = None
+        async_obj._store_node_data = None
+        async_obj._store_children = store_children
+        async_obj._callback = callback
+        async_obj.rawlink(self.my_callback)
+
+    def aset_node_data(self, path, data, callback):
+        async_obj = self._client_context.set_async(path, data)
+        async_obj._store_data = None
+        async_obj._store_node_data = None
+        async_obj._store_children = False
+        async_obj._callback = callback
+        async_obj.rawlink(self.my_callback)
+
+    def aget_node_data(self, path, store_node_data, callback):
+        async_obj = self._client_context.get_async(path)
+        async_obj._store_node_data = store_node_data
+        async_obj._store_children = False
+        async_obj._callback = callback
+        async_obj._get_obj_list = []
+        async_obj.rawlink(self.my_callback)
+
+    def exists(self, path):
+        return self._client_context.exists(path) is not None
+
+    def create_node(self, path, create_ancestors=True):
+        realpath = self._client_context.create(path, makepath=create_ancestors)
+        return realpath
+
+    def delete_node(self, path, delete_children = False):
+        self._client_context.delete(path, recursive=delete_children)
+
+    def get_node_data(self, path):
+        node_data, _ = self._client_context.get(path)
+        return node_data
+
+    def set_node_data(self, path, data, _):
+        self._client_context.set(path, data)
+
+    def get_node_children(self, path):
+        children = self._client_context.get_children(path)
+        return children
+
+    def register_watcher(self, path, watcher):
+        """
+        Register a function to be called whenever the data
+        contained in the specified path is modified.  Note that
+        this will not fire when the path metadata is changed.
+
+        @param path     - path to node to monitor
+        @param watcher  - function to call when data at the
+                          specified path is modified.
+        """
+        if not path in self._node_watchers:
+            self._node_watchers[path] = NodeWatcher(self._client_context, path)
+
+        self._node_watchers[path].register(watcher)
+
+    def unregister_watcher(self, path, watcher):
+        """
+        Unregister a function that was previously setup to
+        monitor a path with register_watcher().
+
+        @param path     - path to stop watching
+        @param watcher  - function to no longer call when data at
+                          the specified path changes
+        """
+        if not path in self._node_watchers:
+            return
+
+        self._node_watchers[path].unregister(watcher)
+
+    def stop(self):
+        self._client_context.stop()
+        self._client_context.close()
+
+
+class Zake(ZkClient):
+    """This class implements the abstract methods in the ZkClient.
+    ZAKE is pseudo implementation of zookeeper."""
+    _zake_client_context = None
+
+    def client_init(self, _, __):
+        if Zake._zake_client_context is None:
+            # In collapsed mode, this is going to be called multiple times but
+            # we want to make sure we only have a single FakeClient, so the
+            # client_context is a class attribute.
+            Zake._zake_client_context = zake.fake_client.FakeClient()
+            Zake._zake_client_context.start()
+            print("starting ZAKE")
+        self._client_context = Zake._zake_client_context
+
+
+class Kazoo(ZkClient):
+    """This class implements the abstract methods in the ZkClient.
+    Kazoo is a python implementation of zookeeper."""
+
+    @staticmethod
+    def my_listener( state):
+        if state == kazoo.client.KazooState.LOST:
+            # Register somewhere that the session was lost
+            print("Kazoo connection lost")
+        elif state == kazoo.client.KazooState.SUSPENDED:
+            # Handle being disconnected from Zookeeper
+            print("Kazoo connection suspended")
+        else:
+            # Handle being connected/reconnected to Zookeeper
+            print("Kazoo connection established")
+
+    def client_init(self, unique_ports, server_names, timeout=120):
+        # Mangle the port ids based on uid,
+        # we need to find a more permanent solution
+        if unique_ports:
+            uid = os.getuid()
+            uid = (uid - UID_BASE)%MAX_ENSEMBLE_SIZE
+            port_base = uid+2181
+        else:
+            port_base = 2181
+
+        first = 1
+        for server_name in server_names:
+            if first:
+                connection_str = ("%s:%d" % (server_name, port_base))
+                first = 0
+            else:
+                connection_str += (",%s:%d" % (server_name, port_base))
+
+            if unique_ports:
+                port_base += 1
+
+        print("KazooClient connecting to %s" % (connection_str))
+        self._client_context = kazoo.client.KazooClient(connection_str, timeout=120)
+        self._client_context.add_listener(self.my_listener)
+        self._client_context.start(timeout)
+
+# vim: sw=4 ts=4
+
diff --git a/modules/core/rwvx/rwcal/src/CMakeLists.txt b/modules/core/rwvx/rwcal/src/CMakeLists.txt
new file mode 100644 (file)
index 0000000..1d3b58a
--- /dev/null
@@ -0,0 +1,27 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 05/22/2014
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+add_definitions(-std=gnu99)
+
+add_library(rwcal_api SHARED
+  rwcal_py.c
+  rwcal_rwzk.c)
+
+target_link_libraries(rwcal_api PRIVATE
+  rwcal-1.0
+  rwcal_yang_gen
+  rwlib
+  rw_vx_plugin
+  peas-1.0)
+
+add_dependencies(rwcal_api rwmanifest_yang.headers)
+
+install(TARGETS rwcal_api LIBRARY DESTINATION usr/lib COMPONENT ${PKG_LONG_NAME})
+
+install(PROGRAMS rwvim.py DESTINATION usr/bin COMPONENT ${PKG_LONG_NAME})
diff --git a/modules/core/rwvx/rwcal/src/Makefile b/modules/core/rwvx/rwcal/src/Makefile
new file mode 100644 (file)
index 0000000..f68ec52
--- /dev/null
@@ -0,0 +1,24 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 05/22/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/modules/core/rwvx/rwcal/src/rwcal_py.c b/modules/core/rwvx/rwcal/src/rwcal_py.c
new file mode 100644 (file)
index 0000000..c4d7834
--- /dev/null
@@ -0,0 +1,101 @@
+
+/*
+ * 
+ * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+ *
+ *
+ */
+
+#include <libpeas/peas.h>
+
+#include "rwcal-api.h"
+
+rwcal_module_ptr_t rwcal_module_alloc()
+{
+  rw_status_t status;
+  rwcal_module_ptr_t rwcal;
+
+  rwcal = (rwcal_module_ptr_t)malloc(sizeof(struct rwcal_module_s));
+  if (!rwcal)
+    return NULL;
+
+  bzero(rwcal, sizeof(struct rwcal_module_s));
+
+  rwcal->framework = rw_vx_framework_alloc();
+  if (!rwcal->framework)
+    goto err;
+
+  rw_vx_require_repository("RwCal", "1.0");
+
+  status = rw_vx_library_open(rwcal->framework, "rwcal_zk", "", &rwcal->mip);
+  if (status != RW_STATUS_SUCCESS)
+    goto err;
+
+  rwcal->zk = peas_engine_create_extension(
+      rwcal->framework->engine,
+      rwcal->mip->modp->plugin_info,
+      RW_CAL_TYPE_ZOOKEEPER,
+      NULL);
+  if (!rwcal->zk)
+    goto err;
+
+  rwcal->zk_cls = RW_CAL_ZOOKEEPER(rwcal->zk);
+  rwcal->zk_iface = RW_CAL_ZOOKEEPER_GET_INTERFACE(rwcal->zk);
+
+  goto done;
+
+err:
+  rwcal_module_free(&rwcal);
+
+done:
+
+  return rwcal;
+}
+
+void rwcal_module_free(rwcal_module_ptr_t * rwcal)
+{
+  if ((*rwcal)->zk)
+    g_object_unref((*rwcal)->zk);
+
+  if ((*rwcal)->cloud)
+    g_object_unref((*rwcal)->cloud);
+
+  if ((*rwcal)->mip)
+    rw_vx_modinst_close((*rwcal)->mip);
+
+  if ((*rwcal)->framework)
+    rw_vx_framework_free((*rwcal)->framework);
+
+  free(*rwcal);
+  *rwcal = NULL;
+
+  return;
+}
+
+rwcal_closure_ptr_t rwcal_closure_alloc(
+    rwcal_module_ptr_t rwcal,
+    rw_status_t (*callback)(rwcal_module_ptr_t, void *, int),
+    void * user_data)
+{
+  rwcal_closure_ptr_t closure = NULL;
+
+  closure = g_object_new(RW_CAL_TYPE_CLOSURE, NULL);
+  if (!closure)
+    goto done;
+
+  closure->m_rwcal = (void *)rwcal;
+  closure->m_callback = (RwCalrwcal_callback)callback;
+  closure->m_user_data = user_data;
+
+done:
+  return closure;
+}
+
+void rwcal_closure_free(rwcal_closure_ptr_t * closure)
+{
+  g_object_unref(*closure);
+  *closure = NULL;
+
+  return;
+}
+
diff --git a/modules/core/rwvx/rwcal/src/rwcal_rwzk.c b/modules/core/rwvx/rwcal/src/rwcal_rwzk.c
new file mode 100644 (file)
index 0000000..c1910ab
--- /dev/null
@@ -0,0 +1,214 @@
+
+/*
+ * 
+ * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+ *
+ *
+ */
+
+#include <string.h>
+
+#include "rwcal-api.h"
+
+rw_status_t rwcal_rwzk_create_server_config(
+    rwcal_module_ptr_t rwcal,
+    int id,
+    bool unique_ports,
+    const char ** server_names)
+{
+  rw_status_t status;
+
+  status = rwcal->zk_iface->create_server_config(
+      rwcal->zk_cls,
+      id,
+      unique_ports,
+      (gchar **)server_names);
+
+  return status;
+}
+
+rw_status_t rwcal_rwzk_server_start(rwcal_module_ptr_t rwcal, int id)
+{
+  rw_status_t status;
+
+  status = rwcal->zk_iface->server_start(rwcal->zk_cls, id);
+
+  return status;
+}
+
+rw_status_t rwcal_rwzk_kazoo_init(
+    rwcal_module_ptr_t rwcal,
+    bool unique_ports,
+    const char ** server_names)
+{
+  rw_status_t status;
+
+  status = rwcal->zk_iface->kazoo_init(
+      rwcal->zk_cls,
+      unique_ports,
+      (gchar **)server_names);
+
+  return status;
+}
+
+rw_status_t
+rwcal_rwzk_zake_init(rwcal_module_ptr_t rwcal)
+{
+  rw_status_t status;
+
+  status = rwcal->zk_iface->zake_init(rwcal->zk_cls);
+
+  return status;
+}
+
+rw_status_t
+rwcal_rwzk_lock(rwcal_module_ptr_t rwcal, const char * path, struct timeval * timeout)
+{
+  rw_status_t status;
+
+  if (timeout)
+    status = rwcal->zk_iface->lock(
+        rwcal->zk_cls,
+        path,
+        timeout->tv_sec + (timeout->tv_usec/1000000.0f));
+  else
+    status = rwcal->zk_iface->lock(rwcal->zk_cls, path, 0);
+
+  return status;
+}
+
+rw_status_t
+rwcal_rwzk_unlock(rwcal_module_ptr_t rwcal, const char * path)
+{
+  rw_status_t status;
+
+  status = rwcal->zk_iface->unlock(rwcal->zk_cls, path);
+
+  return status;
+}
+
+bool
+rwcal_rwzk_locked(rwcal_module_ptr_t rwcal, const char * path)
+{
+  bool ret;
+
+  ret = (bool)rwcal->zk_iface->locked(rwcal->zk_cls, path);
+
+  return ret;
+}
+
+rw_status_t
+rwcal_rwzk_create(rwcal_module_ptr_t rwcal, const char * path,
+                  const rwcal_closure_ptr_t closure)
+{
+  rw_status_t status;
+
+  status = rwcal->zk_iface->create(rwcal->zk_cls, path, closure);
+
+  return status;
+}
+
+bool
+rwcal_rwzk_exists(rwcal_module_ptr_t rwcal, const char * path)
+{
+  bool ret;
+
+  ret = (bool)rwcal->zk_iface->exists(rwcal->zk_cls, path);
+
+  return ret;
+}
+
+rw_status_t rwcal_rwzk_get(
+    rwcal_module_ptr_t rwcal,
+    const char * path,
+    char ** data,
+    const rwcal_closure_ptr_t closure)
+{
+  int status;
+
+  *data = NULL;
+
+  status = rwcal->zk_iface->get(rwcal->zk_cls, path, (gchar **)data, closure);
+  if (status != RW_STATUS_SUCCESS && *data) {
+    free(*data);
+    *data = NULL;
+  }
+
+  return status;
+}
+
+rw_status_t rwcal_rwzk_set(
+    rwcal_module_ptr_t rwcal,
+    const char * path,
+    const char * data,
+    const rwcal_closure_ptr_t closure)
+{
+  rw_status_t status;
+
+  status = rwcal->zk_iface->set(rwcal->zk_cls, path, data, closure);
+
+  return status;
+}
+
+rw_status_t rwcal_rwzk_get_children(
+    rwcal_module_ptr_t rwcal,
+    const char * path,
+    char *** children,
+    const rwcal_closure_ptr_t closure)
+{
+  rw_status_t status;
+
+  *children = NULL;
+
+  status = rwcal->zk_iface->children(rwcal->zk_cls, path, children, closure);
+  if (status != RW_STATUS_SUCCESS) {
+    if (*children) {
+      for (int i = 0; (*children)[i]; ++i)
+        free((*children)[i]);
+      free(*children);
+    }
+
+    *children = NULL;
+  }
+
+  return status;
+}
+
+rw_status_t rwcal_rwzk_delete(rwcal_module_ptr_t rwcal, const char * path,
+                              const rwcal_closure_ptr_t closure)
+{
+  rw_status_t status;
+
+  status = rwcal->zk_iface->rm(rwcal->zk_cls, path, closure);
+
+  return status;
+}
+
+rw_status_t rwcal_rwzk_register_watcher(
+    rwcal_module_ptr_t rwcal,
+    const char * path,
+    const rwcal_closure_ptr_t closure)
+{
+  rw_status_t status;
+
+  status = rwcal->zk_iface->register_watcher(rwcal->zk_cls, path, closure);
+
+  return status;
+}
+
+rw_status_t rwcal_rwzk_unregister_watcher(
+    rwcal_module_ptr_t rwcal,
+    const char * path,
+    const rwcal_closure_ptr_t closure)
+{
+  rw_status_t status = RW_STATUS_SUCCESS;
+
+  status = rwcal->zk_iface->unregister_watcher(rwcal->zk_cls, path, closure);
+
+  return status;
+}
+
+void *rwcal_get_userdata_idx(void *userdata, int idx)
+{
+  return ((void *)(((unsigned long *)userdata)[idx]));
+}
diff --git a/modules/core/rwvx/rwcal/src/rwvim.py b/modules/core/rwvx/rwcal/src/rwvim.py
new file mode 100755 (executable)
index 0000000..674473c
--- /dev/null
@@ -0,0 +1,405 @@
+#!/usr/bin/python3
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Anil Gunturu
+# Creation Date: 07/24/2014
+# 
+
+"""
+This is a skeletal python tool that invokes the rwcal plugin
+to perform cloud operations.
+"""
+
+import argparse
+import os
+import socket
+import sys
+import logging
+
+from gi.repository import GObject, Peas, GLib, GIRepository
+from gi.repository import RwCal, RwTypes
+
+def resource_list_subcommand(rwcloud, cmdargs):
+    status, flavorinfo = rwcloud.get_flavor_list()
+    status, vminfo = rwcloud.get_vm_list()
+    if vminfo is None:
+        return
+
+    hosts = {}
+
+    # no yaml module installed for Python3, hack for now
+    if cmdargs.hostinfo_file_name:
+        with open(cmdargs.hostinfo_file_name, 'r') as f:
+            lines = f.readlines()
+
+        host = None
+        for l in lines:
+            l = l.strip()
+
+            if l == 'hosts:':
+                continue
+
+            if l == '-':
+                if host:
+                    hosts[host['name']] = host
+                    #hosts.append(host)
+                host = {}
+                continue
+
+            k,v = l.split(':')
+            host[k.strip()] = v.strip()
+
+    # Collect the unique Top of Rack (TOR) swithes
+    tors = set(hosts[vm.host_name]['tor'].lower() for vm in vminfo.vminfo_list)
+
+    print("resources:")
+    for vm in vminfo.vminfo_list:
+        _, __, host_ip_list  = socket.gethostbyaddr(vm.host_name)
+
+        print(" -")
+        print("    name: {}".format(vm.vm_name))
+        print("    osid: {}".format(vm.vm_id))
+        print("    host_name: {}".format(vm.host_name))
+        print("    host_ip: {}".format(host_ip_list[0]))
+        controller, scratch = cmdargs.auth_url[7:].split(':')
+        print("    controller: {}".format(controller))
+        print("    tor: {}".format(hosts[vm.host_name]['tor']))
+        print("    image_name: {}".format(vm.image_name))
+        print("    flavor_name: {}".format(vm.flavor_name))
+        print("    availability_zone: {}".format(vm.availability_zone))
+        print("    private_ip_list: {}".format(
+                sorted(v.ip_address for v in vm.private_ip_list)
+        ))
+        # select the 10.0 network for management ip
+        for p in vm.private_ip_list:
+            if p.ip_address.startswith('10.0.'):
+                print("    ip_address: {}".format(p.ip_address))
+                break;
+
+        print("    public_ip_list: {}".format(
+                [v.ip_address for v in vm.public_ip_list]
+        ))
+        for flavor in flavorinfo.flavorinfo_list:
+            if flavor.name == vm.flavor_name:
+                print("    vcpu: {}".format(flavor.vcpus))
+                print("    ram: {}".format(flavor.memory))
+                print("    disk: {}".format(flavor.disk))
+                print("    host_aggregate_list: {}".format(
+                        [a.name for a in flavor.host_aggregate_list]
+                ))
+                print("    pci_passthrough_device_list: {}".format(
+                        [(d.alias,d.count) for d in flavor.pci_passthrough_device_list]
+                ))
+                # Number of openflow switches this resource connects to are the
+                # number of TOR switches for the pool for demos
+                print("    num_openflow_switches: {}".format(len(tors)))
+                # The number of legacy switches are 0 for demos
+                print("    num_legacy_switches: 0")
+                print("    epa_attributes:")
+
+                # HACK: rw_wag* VMs trusted_execution is always TRUE
+                if vm.vm_name.startswith('rw_wag'):
+                    trusted_execution = 'TRUE'
+                else:
+                    trusted_execution = str(flavor.trusted_host_only).upper()
+                print("        trusted_execution: {}".format(trusted_execution))
+                print("        ddio: {}".format(hosts[vm.host_name]['ddio']))
+                print("        cat: {}".format(hosts[vm.host_name]['cat']))
+                print("        ovs_acceleration: {}".format(hosts[vm.host_name]['ovs_acceleration']))
+                print("        mem_page_size: {}".format(flavor.mem_page_size))
+                if flavor.cpu_threads:
+                    print("        cpu_threads: {}".format(flavor.cpu_threads))
+                print("        cpu_pinning_policy: {}".format(flavor.cpu_policy))
+                # print("            numa_policy: {{ node_cnt: {} }}".format(flavor.numa_node_cnt))
+                print("        numa_node_cnt: {}".format(flavor.numa_node_cnt))
+
+                # if any of the PCI passthrough devices are Coleto Creek
+                # set qat to accel
+                qat=False
+                passthrough=False
+                rrc=False
+                for d in flavor.pci_passthrough_device_list:
+                    if 'COLETO' in d.alias:
+                        qat=True
+                        break
+                    elif '10G' in d.alias:
+                        passthrough=True
+                    elif '100G' in d.alias:
+                        passthrough=True
+                        rrc=True
+                # NOTE: The following can break if SRIOV is used
+                # But for the demos 1,2,3 SRIOV is not used
+                # This is updated logic to set the nic to default to Niantic
+                # if 100G is not in the devise list.
+                if rrc:
+                    print("        nic: RRC")
+                else:
+                    print("        nic: NIANTIC")
+
+                if passthrough or hosts[vm.host_name]['ovs_acceleration'].upper() != 'DISABLED':
+                    print("        dpdk_accelerated: TRUE")
+                else:
+                    print("        dpdk_accelerated: FALSE")
+
+                if passthrough:
+                    print("        pci_passthrough: TRUE")
+                else:
+                    print("        pci_passthrough: FALSE")
+
+                if qat:
+                    print("        quick_assist_policy: MANDATORY")
+                else:
+                    print("        quick_assist_policy: NOACCEL")
+
+                break
+    
+def resource_subcommand(rwcloud, cmdargs):
+    """Process the resources subcommand"""
+
+    if cmdargs.which == 'list':
+        resource_list_subcommand(rwcloud, cmdargs)
+
+def vm_list_subcommand(rwcloud, cmdargs):
+    status, vminfo = rwcloud.get_vm_list()
+    for vm in vminfo.vminfo_list:
+        print(vm)
+
+def vm_show_subcommand(rwcloud, cmdargs):
+    status, vm = rwcloud.get_vm(cmdargs.id)
+    print(vm)
+
+def vm_create_subcommand(cmdargs):
+    pass
+
+def vm_destroy_subcommand(cmdargs):
+    pass
+
+def vm_reboot_subcommand(cmdargs):
+    pass
+
+def vm_start_subcommand(cmdargs):
+    pass
+
+def vm_subcommand(rwcloud, cmdargs):
+    """Process the vm subcommand"""
+
+    if cmdargs.which == 'list':
+        vm_list_subcommand(rwcloud, cmdargs)
+    elif cmdargs.which == 'show':
+        vm_show_subcommand(rwcloud, cmdargs)
+    elif cmdargs.which == 'create':
+        vm_create_subcommand(cmdargs)
+    elif cmdargs.which == 'reboot':
+        vm_reboot_subcommand(cmdargs)
+    elif cmdargs.which == 'start':
+        vm_start_subcommand(cmdargs)
+    elif cmdargs.which == 'destroy':
+        vm_destroy_subcommand(cmdargs)
+
+def flavor_list_subcommand(rwcloud, cmdargs):
+    status, flavorinfo = rwcloud.get_flavor_list()
+    for flavor in flavorinfo.flavorinfo_list:
+        print(flavor)
+
+def flavor_show_subcommand(rwcloud, cmdargs):
+    status, flavor = rwcloud.get_flavor(cmdargs.id)
+    print(flavor)
+
+def flavor_subcommand(rwcloud, cmdargs):
+    """Process the flavor subcommand"""
+
+    if cmdargs.which == 'list':
+        flavor_list_subcommand(rwcloud, cmdargs)
+    elif cmdargs.which == 'show':
+        flavor_show_subcommand(rwcloud, cmdargs)
+
+
+def main(args=sys.argv[1:]):
+    logging.basicConfig(format='RWCAL %(message)s')
+
+    ##
+    # Command line argument specification
+    ##
+    desc="""This tool is used to manage the VMs"""
+    parser = argparse.ArgumentParser(description=desc)
+    subparsers = parser.add_subparsers()
+
+    # ipaddr = socket.gethostbyname(socket.getfqdn())
+    # default_auth_url = 'http://%s:35357/v2.0/tokens' % ipaddr
+    default_auth_url = "http://10.64.1.31:35357/v2.0/tokens"
+
+    parser.add_argument('-t', '--provider-type', dest='provider_type',
+                        type=str, default='OPENSTACK',
+                        help='Cloud provider type (default: %(default)s)')
+    parser.add_argument('-u', '--user-name', dest='user',
+                        type=str, default='demo',
+                        help='User name (default: %(default)s)')
+    parser.add_argument('-p', '--password', dest='passwd',
+                        type=str, default='mypasswd',
+                        help='Password (default: %(default)s)')
+    parser.add_argument('-n', '--tenant-name', dest='tenant',
+                        type=str, default='demo',
+                        help='User name (default: %(default)s)')
+    parser.add_argument('-a', '--auth-url', dest='auth_url',
+                        type=str, default=default_auth_url,
+                        help='Password (default: %(default)s)')
+
+    ##
+    # Subparser for Resources
+    ##
+    resource_parser = subparsers.add_parser('resource')
+    resource_subparsers = resource_parser.add_subparsers()
+
+    # List resource subparser
+    resource_list_parser = resource_subparsers.add_parser('list')
+    resource_list_parser.set_defaults(which='list')
+    resource_list_parser.add_argument('-f', '--hostinfo-file-name', 
+                                  dest='hostinfo_file_name', 
+                                  required=True,
+                                  type=str,
+                                  help='name of the static yaml file containing host information')
+
+    resource_parser.set_defaults(func=resource_subcommand)
+
+    ##
+    # Subparser for Flavor
+    ##
+    flavor_parser = subparsers.add_parser('flavor')
+    flavor_subparsers = flavor_parser.add_subparsers()
+
+    # List flavor subparser
+    flavor_list_parser = flavor_subparsers.add_parser('list')
+    flavor_list_parser.set_defaults(which='list')
+
+    # Show flavor subparser
+    flavor_show_parser = flavor_subparsers.add_parser('show')
+    flavor_show_parser.add_argument('id', type=str)
+    flavor_show_parser.set_defaults(which='show')
+
+    flavor_parser.set_defaults(func=flavor_subcommand)
+
+    ##
+    # Subparser for VM
+    ##
+    vm_parser = subparsers.add_parser('vm')
+    vm_subparsers = vm_parser.add_subparsers()
+
+    # Create VM subparser
+    vm_create_parser = vm_subparsers.add_parser('create')
+    vm_create_parser.add_argument('-c', '--count',
+                                  type=int, default=1,
+                                  help='The number of VMs to launch '
+                                       '(default: %(default)d)')
+    vm_create_parser.add_argument('-i', '--image',
+                                  default='rwopenstack_vm',
+                                  help='Specify the image for the VM')
+    vm_create_parser.add_argument('-n', '--name',
+                                  help='Specify the name of the VM')
+    vm_create_parser.add_argument('-f', '--flavor',
+                                  help='Specify the flavor for the VM')
+    vm_create_parser.add_argument('-R', '--reserve', dest='reserve_new_vms',
+                                  action='store_true', help='reserve any newly created VMs')
+    vm_create_parser.add_argument('-s', '--single', dest='wait_after_create',
+                                  action='store_true', 
+                                  help='wait for each VM to start before creating the next')
+
+    vm_create_parser.set_defaults(which='create')
+
+    # Reboot VM subparser
+    vm_reboot_parser = vm_subparsers.add_parser('reboot')
+    group = vm_reboot_parser.add_mutually_exclusive_group()
+    group.add_argument('-n', '--vm-name', dest='vm_name',
+                       type=str,
+                       help='Specify the name of the VM')
+    group.add_argument('-a', '--reboot-all',
+                       dest='reboot_all', action='store_true',
+                       help='Reboot all VMs')
+    vm_reboot_parser.add_argument('-s', '--sleep', 
+                                  dest='sleep_time', 
+                                  type=int, default=4, 
+                                  help='time in seconds to sleep between reboots')
+    vm_reboot_parser.set_defaults(which='reboot')
+
+    # Destroy VM subparser
+    vm_destroy_parser = vm_subparsers.add_parser('destroy')
+    group = vm_destroy_parser.add_mutually_exclusive_group()
+    group.add_argument('-n', '--vm-name', dest='vm_name',
+                       type=str,
+                       help='Specify the name of the VM (accepts regular expressions)')
+    group.add_argument('-a', '--destroy-all',
+                       dest='destroy_all', action='store_true',
+                       help='Delete all VMs')
+    group.add_argument('-w', '--wait',
+                       dest='wait', action='store_true',
+                       help='destroy all and wait until all VMs have exited')
+    vm_destroy_parser.set_defaults(which='destroy')
+
+    # List VM subparser
+    vm_list_parser = vm_subparsers.add_parser('list')
+    vm_list_parser.set_defaults(which='list')
+    vm_list_parser.add_argument('-i', '--ips_only', dest='ipsonly',
+                                action='store_true',
+                                help='only list IP addresses')
+
+    # Show vm subparser
+    vm_show_parser = vm_subparsers.add_parser('show')
+    vm_show_parser.add_argument('id', type=str)
+    vm_show_parser.set_defaults(which='show')
+    vm_parser.set_defaults(func=vm_subcommand)
+
+    cmdargs = parser.parse_args(args)
+
+    # Open the peas engine
+    engine = Peas.Engine.get_default()
+
+    # Load our plugin proxy into the g_irepository namespace
+    default = GIRepository.Repository.get_default()
+    GIRepository.Repository.require(default, "RwCal", "1.0", 0)
+
+    # Enable python language loader
+    engine.enable_loader("python3");
+
+    # Set the search path for peas engine,
+    # rift-shell sets the PLUGINDIR and GI_TYPELIB_PATH
+    paths = set([])
+    paths = paths.union(os.environ['PLUGINDIR'].split(":"))
+    for path in paths:
+        engine.add_search_path(path, path)
+
+    # Load the rwcal python plugin and create the extension.
+    info = engine.get_plugin_info("rwcal-plugin")
+    if info is None:
+        print("Error loading rwcal-python plugin")
+        sys.exit(1)
+    engine.load_plugin(info)
+    rwcloud = engine.create_extension(info, RwCal.Cloud, None)
+
+    # For now cloud credentials are hard coded
+    if cmdargs.provider_type == 'OPENSTACK':
+        provider_type = RwCal.CloudType.OPENSTACK_AUTH_URL
+    elif cmdargs.provider_type == 'EC2_US_EAST':
+        provider_type = RwCal.CloudType.EC2_US_EAST
+    elif cmdargs.provider_type == 'VSPHERE':
+        provider_type = RwCal.CloudType.VSPHERE
+    else:
+        sys.exit("Cloud provider %s is NOT supported yet" % cmdargs.provider_type)
+
+
+    if not 'RIFT_SHELL' in os.environ:
+        sys.stderr.write("This tool should be run from inside a rift-shell")
+
+    status = rwcloud.init(provider_type, 
+                          cmdargs.user, 
+                          cmdargs.passwd, 
+                          cmdargs.auth_url,
+                          cmdargs.tenant);
+
+    assert status == RwTypes.RwStatus.SUCCESS
+
+    cmdargs.func(rwcloud, cmdargs)
+
+if __name__ == "__main__":
+    main()
+
diff --git a/modules/core/rwvx/rwcal/test/CMakeLists.txt b/modules/core/rwvx/rwcal/test/CMakeLists.txt
new file mode 100644 (file)
index 0000000..36c574d
--- /dev/null
@@ -0,0 +1,66 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Anil Gunturu
+# Creation Date: 06/27/2014
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs cal_module_test)
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
+
+##
+# Unit test target
+##
+
+rift_gtest(unittest_rwcal_zk
+  TEST_SRCS rwcal_zk_gtest.cpp
+  TEST_LIBS
+    rwcal_api
+    rwvcs
+)
+
+# rift_gtest(unittest_rwcal_cloud
+#   TEST_SRCS rwcal_cloud_gtest.cpp
+#   TEST_LIBS
+#     rwcal_api
+#     rwcal_yang_gen
+# )
+
+rift_gtest(unittest_rwcal_callback
+  TEST_SRCS rwcal_callback_gtest.cpp
+  TEST_LIBS
+    rwcal-1.0
+    rwcal_api
+)
+
+##
+# Add the basic plugin python test
+##
+#rift_py3test(openstack_cal_tests
+#  LONG_UNITTEST_TARGET
+#  TEST_ARGS -m pytest --junit-xml=${RIFT_UNITTEST_DIR}/openstack_cal/unittest.xml #${CMAKE_CURRENT_SOURCE_DIR}/test_rwcal_openstack_pytest.py
+#)
+
+
+add_executable(rwcal_dump rwcal_dump.cpp)
+target_link_libraries(rwcal_dump
+  rwcal_api
+  rwlib
+  rwyang
+  rwcal_yang_gen
+  CoreFoundation
+  glib-2.0
+  protobuf-c
+)
+
+# added for 4.0
+install(
+  FILES 
+    RIFT.ware-ready.py 
+    openstack_resources.py
+  DESTINATION usr/bin
+  COMPONENT ${PKG_LONG_NAME}
+)
+
diff --git a/modules/core/rwvx/rwcal/test/RIFT.ware-ready.py b/modules/core/rwvx/rwcal/test/RIFT.ware-ready.py
new file mode 100755 (executable)
index 0000000..f4192c6
--- /dev/null
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3
+
+import re
+import sys
+from rift.rwcal.openstack.openstack_drv import OpenstackDriver
+
+
+
+def test_openstack(drv):
+       print("checking endpoints")
+       for ep in [ 'compute', 'image', 'network', 'metering' ]: 
+               url = drv.ks_drv.get_service_endpoint(ep, 'publicURL')
+               print("%s: %s" % ( ep, url))
+               if re.search(url, '127.0.0'): 
+                       raise Exception("endpoint %s is using a loopback URL: %s" % ( ep, url))
+
+       def verify(name, min, count):
+               if count < min:
+                       raise Exception("only %d instances of %s found. Minimum is %d" % ( count, name, min))
+               print("found %d %s" % ( count, name ))
+               
+       verify("images"         , 1, len(drv.glance_image_list()))
+       verify("flavors "       , 1, len(drv.nova_flavor_list()))
+       verify("floating ips "  , 1, len(drv.nova_floating_ip_list()))
+       verify("servers"        , 0, len(drv.nova_server_list()))
+       verify("networks"       , 1, len(drv.neutron_network_list()))
+       verify("subnets"        , 1, len(drv.neutron_subnet_list()))
+       verify("ports"          , 1, len(drv.neutron_port_list()))
+       verify("ceilometers"    , 1, len(drv.ceilo_meter_list()))
+       
+
+
+if len(sys.argv) != 6:
+       print("ARGS are admin_user admin_password auth_url tenant_name mgmt_network_name")
+       print("e.g. %s pluto mypasswd http://10.95.4.2:5000/v3 demo private" % __file__ )
+       sys.exit(1)
+
+args=tuple(sys.argv[1:5])
+
+try:
+       v3 = OpenstackDriver(*args)
+except Exception as e:
+       print("v3 failed: %s" % e)
+else:
+       print("v3 endpoint instantiated")
+       test_openstack(v3)
+       print("SUCCESS! v3 is working")
+       sys.exit(0)
+
+
+try:
+       v2 = OpenstackDriver(*args)
+except Exception as e:
+       print("v2 failed: %s" % e)
+else:
+       print("v2 endpoint instantiated")
+       print("SUCCESS! v2 is working")
+       test_openstack(v2)
+
+
+# need to check if any public urls are loopbacks
+# need to check DNS is set up right 
+#    neutron subnet-show private_subnet
+#    host repo.riftio.com  10.64.1.3
+
diff --git a/modules/core/rwvx/rwcal/test/aws_resources.py b/modules/core/rwvx/rwcal/test/aws_resources.py
new file mode 100644 (file)
index 0000000..e0bde5b
--- /dev/null
@@ -0,0 +1,350 @@
+#!/usr/bin/python3
+
+import os
+import sys
+import uuid
+import rw_peas
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+import argparse
+import logging
+import rwlogger
+import boto3
+import botocore
+
+persistent_resources = {
+    'vms'      : [],
+    'networks' : [],
+}
+
+MISSION_CONTROL_NAME = 'mission-control'
+LAUNCHPAD_NAME = 'launchpad'
+
+RIFT_IMAGE_AMI = 'ami-7070231a'
+
+logging.basicConfig(level=logging.ERROR)
+logger = logging.getLogger('rift.cal.awsresources')
+logger.setLevel(logging.INFO)
+
+def get_cal_plugin():
+    """
+        Load AWS cal plugin
+    """
+    plugin = rw_peas.PeasPlugin('rwcal_aws', 'RwCal-1.0')
+    engine, info, extension = plugin()
+    cal = plugin.get_interface("Cloud")
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+    try:
+        rc = cal.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except Exception as e:
+        logger.error("ERROR:Cal plugin instantiation failed with exception %s",repr(e))
+    else:
+        logger.info("AWS Cal plugin successfully instantiated")
+        return cal
+
+def get_cal_account(**kwargs):
+    """
+    Returns AWS cal account
+    """
+    account                        = RwcalYang.CloudAccount()
+    account.account_type           = "aws"
+    account.aws.key = kwargs['key']
+    account.aws.secret = kwargs['secret']
+    account.aws.region = kwargs['region']
+    if 'ssh_key' in kwargs and kwargs['ssh_key'] is not None:
+        account.aws.ssh_key = kwargs['ssh_key']
+    account.aws.availability_zone = kwargs['availability_zone']
+    if 'vpcid' in kwargs and kwargs['vpcid'] is not None: 
+        account.aws.vpcid =  kwargs['vpcid']
+    if 'default_subnet_id' in kwargs and kwargs['default_subnet_id'] is not None:
+        account.aws.default_subnet_id = kwargs['default_subnet_id']
+    return account
+
+class AWSResources(object):
+    """
+    Class with methods to manage AWS resources
+    """
+    def __init__(self,**kwargs):
+        self._cal      = get_cal_plugin()
+        self._acct     = get_cal_account(**kwargs)
+
+    def _destroy_vms(self):
+        """
+        Destroy VMs
+        """
+        logger.info("Initiating VM cleanup")
+        rc, rsp = self._cal.get_vdu_list(self._acct)
+        vdu_list = [vm for vm in rsp.vdu_info_list if vm.name not in persistent_resources['vms']]
+        logger.info("Deleting VMs : %s" %([x.name for x in vdu_list]))
+
+        for vdu in vdu_list:
+            self._cal.delete_vdu(self._acct, vdu.vdu_id)
+
+        logger.info("VM cleanup complete")
+
+    def _destroy_networks(self):
+        """
+        Destroy Networks
+        """
+        logger.info("Initiating Network cleanup")
+        driver = self._cal._get_driver(self._acct)
+        subnets = driver.get_subnet_list()
+        subnet_list = [subnet for subnet in subnets if subnet.default_for_az is False]
+
+        logger.info("Deleting Networks : %s" %([x.id for x in subnet_list]))
+        for subnet in subnet_list:
+            self._cal.delete_virtual_link(self._acct, subnet.subnet_id)
+        logger.info("Network cleanup complete")
+
+    def destroy_resource(self):
+        """
+        Destroy resources
+        """
+        logger.info("Cleaning up AWS resources")
+        self._destroy_vms()
+        self._destroy_networks()
+        logger.info("Cleaning up AWS resources.......[Done]")
+
+    def _destroy_mission_control(self):
+        """
+        Destroy Mission Control VM
+        """
+        logger.info("Initiating MC VM cleanup")
+        rc, rsp = self._cal.get_vdu_list(self._acct)
+        vdu_list = [vm for vm in rsp.vdu_info_list if vm.name == MISSION_CONTROL_NAME]
+        logger.info("Deleting VMs : %s" %([x.name for x in vdu_list]))
+
+        for vdu in vdu_list:
+            self._cal.delete_vdu(self._acct, vdu.vdu_id)
+        logger.info("MC VM cleanup complete")
+
+    def _destroy_launchpad(self):
+        """
+        Destroy Launchpad VM
+        """
+        logger.info("Initiating LP VM cleanup")
+        rc, rsp = self._cal.get_vdu_list(self._acct)
+        vdu_list = [vm for vm in rsp.vdu_info_list if vm.name == LAUNCHPAD_NAME]
+        logger.info("Deleting VMs : %s" %([x.name for x in vdu_list]))
+
+        for vdu in vdu_list:
+            self._cal.delete_vdu(self._acct, vdu.vdu_id)
+        logger.info("LP VM cleanup complete")
+        
+
+    def create_mission_control(self):
+        """
+        Create Mission Control VM in AWS
+        """ 
+        logger.info("Creating mission control VM")
+        vdu = RwcalYang.VDUInitParams()
+        vdu.name = MISSION_CONTROL_NAME
+        vdu.image_id = RIFT_IMAGE_AMI
+        vdu.flavor_id = 'c3.large'
+        vdu.allocate_public_address = True
+        vdu.vdu_init.userdata = "#cloud-config\n\nruncmd:\n - echo Sleeping for 5 seconds and attempting to start salt-master\n - sleep 5\n - /bin/systemctl restart salt-master.service\n"
+
+        rc,rs=self._cal.create_vdu(self._acct,vdu)
+        assert rc == RwStatus.SUCCESS
+        self._mc_id = rs
+
+        driver = self._cal._get_driver(self._acct)
+        inst=driver.get_instance(self._mc_id)
+        inst.wait_until_running()
+
+        rc,rs =self._cal.get_vdu(self._acct,self._mc_id)
+        assert rc == RwStatus.SUCCESS
+        self._mc_public_ip = rs.public_ip
+        self._mc_private_ip = rs.management_ip
+        
+        logger.info("Started Mission Control VM with id %s and IP Address %s\n",self._mc_id, self._mc_public_ip)
+
+    def create_launchpad_vm(self, salt_master = None):        
+        """
+        Create Launchpad VM in AWS
+        Arguments
+            salt_master (String): String with Salt master IP typically MC VM private IP
+        """
+        logger.info("Creating launchpad VM")
+        USERDATA_FILENAME = os.path.join(os.environ['RIFT_INSTALL'],
+                                 'etc/userdata-template')
+
+        try:
+            fd = open(USERDATA_FILENAME, 'r')
+        except Exception as e:
+                sys.exit(-1)
+        else:
+            LP_USERDATA_FILE = fd.read()
+            # Run the enable lab script when the openstack vm comes up
+            LP_USERDATA_FILE += "runcmd:\n"
+            LP_USERDATA_FILE += " - echo Sleeping for 5 seconds and attempting to start elastic-network-interface\n"
+            LP_USERDATA_FILE += " - sleep 5\n"
+            LP_USERDATA_FILE += " - /bin/systemctl restart elastic-network-interfaces.service\n"
+
+        if salt_master is None:
+            salt_master=self._mc_private_ip
+        node_id = str(uuid.uuid4())
+
+        vdu = RwcalYang.VDUInitParams()
+        vdu.name = LAUNCHPAD_NAME
+        vdu.image_id = RIFT_IMAGE_AMI
+        vdu.flavor_id = 'c3.xlarge'
+        vdu.allocate_public_address = True
+        vdu.vdu_init.userdata = LP_USERDATA_FILE.format(master_ip = salt_master,
+                                          lxcname = node_id)
+        vdu.node_id = node_id
+
+        rc,rs=self._cal.create_vdu(self._acct,vdu)
+        assert rc == RwStatus.SUCCESS
+        self._lp_id = rs
+
+        driver = self._cal._get_driver(self._acct)
+        inst=driver.get_instance(self._lp_id)
+        inst.wait_until_running()
+
+        rc,rs =self._cal.get_vdu(self._acct,self._lp_id)
+        assert rc == RwStatus.SUCCESS
+
+        self._lp_public_ip = rs.public_ip
+        self._lp_private_ip = rs.management_ip
+        logger.info("Started Launchpad VM with id %s and IP Address %s\n",self._lp_id, self._lp_public_ip)
+         
+    def upload_ssh_key_to_ec2(self):
+        """
+         Upload SSH key to EC2 region
+        """
+        driver = self._cal._get_driver(self._acct)
+        key_name = os.getlogin() + '-' + 'sshkey' 
+        key_path = '%s/.ssh/id_rsa.pub' % (os.environ['HOME'])
+        if os.path.isfile(key_path):
+            logger.info("Uploading ssh public key file in path %s with keypair name %s", key_path,key_name)
+            with open(key_path) as fp:
+                driver.upload_ssh_key(key_name,fp.read())
+        else:
+            logger.error("Valid Public key file %s not found", key_path)
+
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to manage AWS resources')
+
+    parser.add_argument('--aws-key',
+                        action = 'store',
+                        dest = 'aws_key',
+                        type = str,
+                        help='AWS key')
+
+    parser.add_argument('--aws-secret',
+                        action = 'store',
+                        dest = 'aws_secret',
+                        type = str,
+                        help='AWS secret')
+
+    parser.add_argument('--aws-region',
+                        action = 'store',
+                        dest = 'aws_region',
+                        type = str,
+                        help='AWS region')
+
+    parser.add_argument('--aws-az',
+                        action = 'store',
+                        dest = 'aws_az',
+                        type = str,
+                        help='AWS Availability zone')
+
+    parser.add_argument('--aws-sshkey',
+                        action = 'store',
+                        dest = 'aws_sshkey',
+                        type = str,
+                        help='AWS SSH Key to login to instance')
+
+    parser.add_argument('--aws-vpcid',
+                        action = 'store',
+                        dest = 'aws_vpcid',
+                        type = str,
+                        help='AWS VPC ID to use to indicate non default VPC')
+
+    parser.add_argument('--aws-default-subnet',
+                        action = 'store',
+                        dest = 'aws_default_subnet',
+                        type = str,
+                        help='AWS Default subnet id in VPC to be used for mgmt network')
+
+    parser.add_argument('--mission-control',
+                        action = 'store_true',
+                        dest = 'mission_control',
+                        help='Create Mission Control VM')
+
+    parser.add_argument('--launchpad',
+                        action = 'store_true',
+                        dest = 'launchpad',
+                        help='Create LaunchPad VM')
+
+    parser.add_argument('--salt-master',
+                        action = 'store',
+                        dest = 'salt_master',
+                        type = str,
+                        help='IP Address of salt controller. Required, if only launchpad  VM is being created.')
+
+    parser.add_argument('--cleanup',
+                        action = 'store',
+                        dest = 'cleanup',
+                        nargs = '+',
+                        type = str,
+                        help = 'Perform resource cleanup for AWS installation. \n Possible options are {all, mc, lp,  vms, networks }')
+
+    parser.add_argument('--upload-ssh-key',
+                         action = 'store_true',
+                         dest = 'upload_ssh_key',
+                         help = 'Upload users SSH public key ~/.ssh/id_rsa.pub')  
+
+    argument = parser.parse_args()
+
+    if (argument.aws_key is None or argument.aws_secret is None or argument.aws_region is None or
+       argument.aws_az is None):
+        logger.error("Missing mandatory params. AWS Key, Secret, Region, AZ and SSH key are mandatory params")
+        sys.exit(-1)
+
+    if (argument.cleanup is None and argument.mission_control is None and argument.launchpad is None 
+        and argument.upload_ssh_key is None):
+        logger.error('Insufficient parameters')
+        sys.exit(-1)
+
+    ### Start processing
+    logger.info("Instantiating cloud-abstraction-layer")
+    drv = AWSResources(key=argument.aws_key, secret=argument.aws_secret, region=argument.aws_region, availability_zone = argument.aws_az, 
+                       ssh_key = argument.aws_sshkey, vpcid = argument.aws_vpcid, default_subnet_id = argument.aws_default_subnet)
+    logger.info("Instantiating cloud-abstraction-layer.......[Done]")
+
+    if argument.upload_ssh_key:
+         drv.upload_ssh_key_to_ec2()
+
+    if argument.cleanup is not None:
+        for r_type in argument.cleanup:
+            if r_type == 'all':
+                drv.destroy_resource()
+                break
+            if r_type == 'vms':
+                drv._destroy_vms()
+            if r_type == 'networks':
+                drv._destroy_networks()
+            if r_type == 'mc':
+                drv._destroy_mission_control()
+            if r_type == 'lp':
+                drv._destroy_launchpad()
+
+    if argument.mission_control == True:
+        drv.create_mission_control()
+
+    if argument.launchpad == True:
+        if argument.salt_master is None and argument.mission_control is False:
+            logger.error('Salt Master IP address not provided to start Launchpad.')
+            sys.exit(-2)
+
+        drv.create_launchpad_vm(argument.salt_master)
+
+if __name__ == '__main__':
+    main()
diff --git a/modules/core/rwvx/rwcal/test/cal_module_test/CMakeLists.txt b/modules/core/rwvx/rwcal/test/cal_module_test/CMakeLists.txt
new file mode 100644 (file)
index 0000000..a565b51
--- /dev/null
@@ -0,0 +1,29 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# Author(s): Varun Prasad
+# Creation Date: 21/01/2016
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+install(
+  PROGRAMS
+    cal_module_test
+  DESTINATION usr/rift/systemtest/cal_module_test
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    pytest/conftest.py
+    pytest/cal_module_test.py
+  DESTINATION usr/rift/systemtest/cal_module_test/pytest
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    racfg/cal_module_test.racfg
+  DESTINATION
+    usr/rift/systemtest/cal_module_test
+    COMPONENT ${PKG_LONG_NAME})
+
diff --git a/modules/core/rwvx/rwcal/test/cal_module_test/cal_module_test b/modules/core/rwvx/rwcal/test/cal_module_test/cal_module_test
new file mode 100755 (executable)
index 0000000..1485867
--- /dev/null
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+SYS_TEST=$RIFT_INSTALL/usr/rift/systemtest/
+PYTEST_DIR=$SYS_TEST/cal_module_test/pytest
+
+TEST_NAME="TC_CAL_MODULE_TEST"
+RESULT_XML="cal_module_test.xml"
+
+test_args="\
+    --junitprefix ${TEST_NAME} \
+    --junitxml ${RIFT_MODULE_TEST}/${RESULT_XML}"
+
+TEST_CMD="py.test -vvv -s ${PYTEST_DIR}/cal_module_test.py ${test_args} "
+
+$TEST_CMD
diff --git a/modules/core/rwvx/rwcal/test/cal_module_test/pytest/cal_module_test.py b/modules/core/rwvx/rwcal/test/cal_module_test/pytest/cal_module_test.py
new file mode 100644 (file)
index 0000000..d544aee
--- /dev/null
@@ -0,0 +1,468 @@
+"""
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+@file cal_test.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 22-Jan-2016
+
+"""
+
+import logging
+import os
+import time
+import uuid
+
+import pytest
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+import rw_peas
+import rwlogger
+
+logger = logging.getLogger('rwcal')
+logging.basicConfig(level=logging.INFO)
+
+
+class CloudConfig(object):
+    def __init__(self, cal, account):
+        self.cal = cal
+        self.account = account
+
+    def check_state(self, object_id, object_api, expected_state, state_attr_name="state"):
+        """For a given object (Vm, port etc) checks if the object has
+        reached the expected state.
+        """
+        get_object = getattr(self.cal, object_api)
+        for i in range(100):  # 100 poll iterations...
+            rc, rs = get_object(self.account, object_id)
+
+            curr_state = getattr(rs, state_attr_name)
+            print (curr_state)
+            if curr_state == expected_state:
+                break
+            else:
+                time.sleep(2)
+
+        rc, rs = get_object(self.account, object_id)
+        assert rc == RwStatus.SUCCESS
+        assert getattr(rs, state_attr_name) == expected_state
+
+
+class Aws(CloudConfig):
+    def __init__(self, option):
+        """
+        Args:
+            option (OptionParser): OptionParser instance.
+        """
+        self.image_id = 'ami-7070231a'
+        self.virtual_link_id = None
+        self.flavor_id = None
+        self.vdu_id = None
+
+        super().__init__(self._cal(), self._account(option))
+
+    def _cal(self):
+        """
+        Loads rw.cal plugin via libpeas
+        """
+        plugin = rw_peas.PeasPlugin('rwcal_aws', 'RwCal-1.0')
+
+        engine, info, extension = plugin()
+
+        # Get the RwLogger context
+        rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+        cal = plugin.get_interface("Cloud")
+        try:
+            rc = cal.init(rwloggerctx)
+            assert rc == RwStatus.SUCCESS
+        except:
+            logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+        else:
+            logger.info("AWS Cal plugin successfully instantiated")
+        return cal
+
+    def _account(self, option):
+        """
+        Args:
+            option (OptionParser): OptionParser instance.
+
+        Return:
+            CloudAccount details
+        """
+        account = RwcalYang.CloudAccount.from_dict({
+                "account_type": "aws",
+                "aws": {
+                    "key": option.aws_user,
+                    "secret": option.aws_password,
+                    "region": option.aws_region,
+                    "availability_zone": option.aws_zone,
+                    "ssh_key": option.aws_ssh_key
+                }
+            })
+
+        return account
+
+    def flavor(self):
+        """
+        Returns:
+            FlavorInfoItem
+        """
+        flavor = RwcalYang.FlavorInfoItem.from_dict({
+                    "name": str(uuid.uuid4()),
+                    "vm_flavor": {
+                        "memory_mb": 1024,
+                        "vcpu_count": 1,
+                        "storage_gb": 0
+                    }
+            })
+
+        return flavor
+
+    def vdu(self):
+        """Provide AWS specific VDU config.
+
+        Returns:
+            VDUInitParams
+        """
+        vdu = RwcalYang.VDUInitParams.from_dict({
+                "name": str(uuid.uuid4()),
+                "node_id": "123456789012345",
+                "image_id": self.image_id,
+                "flavor_id": "t2.micro"
+            })
+
+        c1 = vdu.connection_points.add()
+        c1.name = str(uuid.uuid4())
+        c1.virtual_link_id = self.virtual_link_id
+
+        return vdu
+
+    def image(self):
+        raise NotImplementedError("Image create APIs are not implemented for AWS")
+
+    def virtual_link(self):
+        """Provide Vlink config
+
+        Returns:
+            VirtualLinkReqParams
+        """
+        vlink = RwcalYang.VirtualLinkReqParams.from_dict({
+                    "name": str(uuid.uuid4()),
+                    "subnet": '172.31.64.0/20',
+            })
+
+        return vlink
+
+
+class Openstack(CloudConfig):
+    def __init__(self, option):
+        """
+        Args:
+            option (OptionParser)
+        """
+        self.image_id = None
+        self.virtual_link_id = None
+        self.flavor_id = None
+        self.vdu_id = None
+
+        super().__init__(self._cal(), self._account(option))
+
+    def _cal(self):
+        """
+        Loads rw.cal plugin via libpeas
+        """
+        plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+        engine, info, extension = plugin()
+
+        # Get the RwLogger context
+        rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+        cal = plugin.get_interface("Cloud")
+        try:
+            rc = cal.init(rwloggerctx)
+            assert rc == RwStatus.SUCCESS
+        except:
+            logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+        else:
+            logger.info("Openstack Cal plugin successfully instantiated")
+        return cal
+
+    def _account(self, option):
+        """Cloud account information for Account
+
+        Returns:
+            CloudAccount
+        """
+        acct = RwcalYang.CloudAccount.from_dict({
+            "account_type": "openstack",
+            "openstack": {
+                    "key": option.os_user,
+                    "secret": option.os_password,
+                    "auth_url": 'http://{}:5000/v3/'.format(option.os_host),
+                    "tenant": option.os_tenant,
+                    "mgmt_network": option.os_network
+                }
+            })
+
+        return acct
+
+    def image(self):
+        """Provides Image config for openstack.
+
+        Returns:
+            ImageInfoItem
+        """
+        image = RwcalYang.ImageInfoItem.from_dict({
+                "name": str(uuid.uuid4()),
+                "location": os.path.join(os.getenv("RIFT_ROOT"), "images/rift-root-latest.qcow2"),
+                "disk_format": "qcow2",
+                "container_format": "bare",
+                "checksum": "12312313123131313131"
+            })
+        return image
+
+    def flavor(self):
+        """Flavor config for openstack
+
+        Returns:
+            FlavorInfoItem
+        """
+        flavor = RwcalYang.FlavorInfoItem.from_dict({
+                "name": str(uuid.uuid4()),
+                "vm_flavor": {
+                        "memory_mb": 16392,
+                        "vcpu_count": 4,
+                        "storage_gb": 40
+                },
+                "guest_epa": {
+                        "cpu_pinning_policy": "DEDICATED",
+                        "cpu_thread_pinning_policy": "SEPARATE",
+                }})
+
+        numa_node_count = 2
+        flavor.guest_epa.numa_node_policy.node_cnt = numa_node_count
+        for i in range(numa_node_count):
+            node = flavor.guest_epa.numa_node_policy.node.add()
+            node.id = i
+            if i == 0:
+                node.vcpu = [0, 1]
+            elif i == 1:
+                node.vcpu = [2, 3]
+            node.memory_mb = 8196
+
+        dev = flavor.guest_epa.pcie_device.add()
+        dev.device_id = "PCI_10G_ALIAS"
+        dev.count = 1
+
+        return flavor
+
+    def vdu(self):
+        """Returns VDU config
+
+        Returns:
+            VDUInitParams
+        """
+        vdu = RwcalYang.VDUInitParams.from_dict({
+                "name": str(uuid.uuid4()),
+                "node_id": "123456789012345",
+                "image_id": self.image_id,
+                "flavor_id": self.flavor_id,
+            })
+
+        c1 = vdu.connection_points.add()
+        c1.name = str(uuid.uuid4())
+        c1.virtual_link_id = self.virtual_link_id
+
+        return vdu
+
+    def virtual_link(self):
+        """vlink config for Openstack
+
+        Returns:
+            VirtualLinkReqParams
+        """
+        vlink = RwcalYang.VirtualLinkReqParams.from_dict({
+                    "name": str(uuid.uuid4()),
+                    "subnet": '192.168.1.0/24',
+            })
+
+        return vlink
+
+
+@pytest.fixture(scope="module", params=[Openstack], ids=lambda val: val.__name__)
+def cloud_config(request):
+    return request.param(request.config.option)
+
+
+@pytest.mark.incremental
+class TestCalSetup:
+    def test_flavor_apis(self, cloud_config):
+        """
+        Asserts:
+            1. If the new flavor is created and available via read APIs
+            2. Verifies the READ APIs
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status, new_flavor_id = cal.create_flavor(account, cloud_config.flavor())
+        cloud_config.flavor_id = new_flavor_id
+        assert status == RwStatus.SUCCESS
+
+        status, flavors = cal.get_flavor_list(account)
+        assert status == RwStatus.SUCCESS
+
+        ids = []
+        for flavor in flavors.flavorinfo_list:
+            status, flavor_single = cal.get_flavor(account, flavor.id)
+            assert status == RwStatus.SUCCESS
+            assert flavor.id == flavor_single.id
+            ids.append(flavor.id)
+
+        assert new_flavor_id in ids
+
+    def test_image_apis(self, cloud_config):
+        """
+        Asserts:
+            1. If the new image is created and available via read APIs
+            2. Verifies the READ APIs
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        if type(cloud_config) is Openstack:
+            status, new_image_id = cal.create_image(account, cloud_config.image())
+            cloud_config.image_id = new_image_id
+            assert status == RwStatus.SUCCESS
+            cloud_config.check_state(new_image_id, "get_image", "active")
+        else:
+            # Hack!
+            new_image_id = "ami-7070231a"
+
+        status, images = cal.get_image_list(account)
+
+        ids = []
+        for image in images.imageinfo_list:
+            status, image_single = cal.get_image(account, image.id)
+            assert status == RwStatus.SUCCESS
+            assert image_single.id == image.id
+            ids.append(image.id)
+
+        assert new_image_id in ids
+
+    def test_virtual_link_create(self, cloud_config):
+        """
+        Asserts:
+            1. If the new Vlink is created and available via read APIs
+            2. Verifies the READ APIs
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status, new_vlink_id = cal.create_virtual_link(account, cloud_config.virtual_link())
+        cloud_config.virtual_link_id = new_vlink_id
+        assert status == RwStatus.SUCCESS
+        cloud_config.check_state(new_vlink_id, "get_virtual_link", "active")
+
+        status, vlinks = cal.get_virtual_link_list(account)
+        assert status == RwStatus.SUCCESS
+
+        ids = []
+        for vlink in vlinks.virtual_link_info_list:
+            status, vlink_single = cal.get_virtual_link(account, vlink.virtual_link_id)
+            assert status == RwStatus.SUCCESS
+            assert vlink_single.virtual_link_id == vlink.virtual_link_id
+            ids.append(vlink.virtual_link_id)
+
+        assert new_vlink_id in ids
+
+    def test_vdu_apis(self, cloud_config):
+        """
+        Asserts:
+            1. If the new VDU is created and available via read APIs
+            2. Verifies the READ APIs
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status, new_vdu_id = cal.create_vdu(account, cloud_config.vdu())
+        cloud_config.vdu_id = new_vdu_id
+        assert status == RwStatus.SUCCESS
+        cloud_config.check_state(new_vdu_id, "get_vdu", "active")
+
+        status, vdus = cal.get_vdu_list(account)
+        assert status == RwStatus.SUCCESS
+
+        ids = []
+        for vdu in vdus.vdu_info_list:
+            status, vdu_single = cal.get_vdu(account, vdu.vdu_id)
+            assert status == RwStatus.SUCCESS
+            assert vdu_single.vdu_id == vdu.vdu_id
+            ids.append(vdu.vdu_id)
+
+        assert new_vdu_id in ids
+
+    def test_modify_vdu_api(self, cloud_config):
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        vdu_modify = RwcalYang.VDUModifyParams()
+        vdu_modify.vdu_id = cloud_config.vdu_id
+        c1 = vdu_modify.connection_points_add.add()
+        c1.name = "c_modify1"
+        # Set the new vlink
+        c1.virtual_link_id = cloud_config.virtual_link_id
+
+        status = cal.modify_vdu(account, vdu_modify)
+        assert status == RwStatus.SUCCESS
+
+@pytest.mark.incremental
+class TestCalTeardown:
+    def test_flavor_delete(self, cloud_config):
+        """
+        Asserts:
+            1. If flavor is deleted
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        if type(cloud_config) != Aws:
+            status = cal.delete_flavor(account, cloud_config.flavor_id)
+            assert status == RwStatus.SUCCESS
+
+    def test_image_delete(self, cloud_config):
+        """
+        Asserts:
+            1. If image is deleted
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        if type(cloud_config) != Aws:
+            status = cal.delete_image(account, cloud_config.image_id)
+            assert status == RwStatus.SUCCESS
+
+    def test_virtual_link_delete(self, cloud_config):
+        """
+        Asserts:
+            1. If VLink is deleted
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status = cal.delete_virtual_link(account, cloud_config.virtual_link_id)
+        assert status == RwStatus.SUCCESS
+
+    def test_delete_vdu(self, cloud_config):
+        """
+        Asserts:
+            1. If VDU is deleted
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status = cal.delete_vdu(account, cloud_config.vdu_id)
+        assert status == RwStatus.SUCCESS
diff --git a/modules/core/rwvx/rwcal/test/cal_module_test/pytest/conftest.py b/modules/core/rwvx/rwcal/test/cal_module_test/pytest/conftest.py
new file mode 100644 (file)
index 0000000..7e10444
--- /dev/null
@@ -0,0 +1,25 @@
+"""
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+@file conftest.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 21/01/2016
+
+"""
+
+def pytest_addoption(parser):
+    # Openstack related options
+    parser.addoption("--os-host", action="store", default="10.66.4.15")
+    parser.addoption("--os-user", action="store", default="pluto")
+    parser.addoption("--os-password", action="store", default="mypasswd")
+    parser.addoption("--os-tenant", action="store", default="demo")
+    parser.addoption("--os-network", action="store", default="private")
+
+    # aws related options
+    parser.addoption("--aws-user", action="store", default="AKIAIKRDX7BDLFU37PDA")
+    parser.addoption("--aws-password", action="store", default="cjCRtJxVylVkbYvOUQeyvCuOWAHieU6gqcQw29Hw")
+    parser.addoption("--aws-region", action="store", default="us-east-1")
+    parser.addoption("--aws-zone", action="store", default="us-east-1c")
+    parser.addoption("--aws-ssh-key", action="store", default="vprasad-sshkey")
diff --git a/modules/core/rwvx/rwcal/test/cal_module_test/racfg/cal_module_test.racfg b/modules/core/rwvx/rwcal/test/cal_module_test/racfg/cal_module_test.racfg
new file mode 100644 (file)
index 0000000..15bcc63
--- /dev/null
@@ -0,0 +1,19 @@
+{
+  "test_name":"TC_CAL_MODULE_TESTS",
+  "commandline":"./cal_module_test",
+  "target_vm":"VM",
+  "test_description":"System test targeting module tests for CAL accounts",
+  "run_as_root": true,
+  "status":"working",
+  "keywords":["nightly","smoke","smoke_stable","MANO","cloudsim"],
+  "timelimit": 2400,
+  "networks":[],
+  "vms":[
+    {
+      "name": "VM",
+      "memory": 8192,
+      "cpus": 4
+    }
+  ]
+}
+
diff --git a/modules/core/rwvx/rwcal/test/cloudtool_cal.py b/modules/core/rwvx/rwcal/test/cloudtool_cal.py
new file mode 100755 (executable)
index 0000000..89b072d
--- /dev/null
@@ -0,0 +1,977 @@
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+# 
+
+import os,sys,platform
+import socket
+import time
+import re
+import logging
+
+from pprint import pprint
+import argparse
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+import rw_peas
+import rwlogger
+import time
+
+global nova
+nova = None
+
+def wait_till_active(driver, account, vm_id_list, timeout):                                                                                                              
+    """
+    Wait until VM reaches ACTIVE state. 
+    """
+    # Wait while VM goes to required state
+
+    start = time.time()
+    end = time.time() + timeout
+    done = False;
+
+    while ( time.time() < end ) and not done:
+       done = True      
+       for vm_id in vm_id_list:
+           rc, rs = driver.get_vm(account, vm_id)
+           assert rc == RwStatus.SUCCESS
+           if rs.state != 'ACTIVE':
+               done = False               
+               time.sleep(2)
+
+
+def get_image_name(node):
+    images = driver.list_images()
+    for i in images:
+        if i.id == node.extra['imageId']:
+            return i.name
+    return None
+
+def get_flavor_name(flavorid):
+    global nova
+    if nova is None:
+        nova = ra_nova_connect(project='admin')
+    for f in nova.flavors.list(True):
+         if f.id == flavorid: 
+             return f.name
+    return None
+
+def hostname():
+    return socket.gethostname().split('.')[0]
+
+def vm_register(id, driver, account, cmdargs, header=True):
+    if testbed is None:
+        print("Cannot register VM without reservation system")
+        return False
+
+    if cmdargs.reserve_new_vms:
+        user=os.environ['USER']
+    else:
+        user=None
+    fmt="%-28s %-12s %-12s %-15s"
+    if header:
+        print('VM                           controller   compute      mgmt ip')
+        print('---------------------------- ------------ ------------ ---------------')
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS
+    for node in nodes.vminfo_list:
+        if id == 'all' or node.vm_id == id:
+            flavor = driver.get_flavor(account, node.flavor_id)
+            assert rc == RwStatus.SUCCESS
+            ip = node.management_ip
+            
+            huge = 'DISABLED'      
+            if flavor.guest_epa.mempage_size == 'LARGE':
+                huge = flavor.guest_epa.mempage_size                                                               
+            #compute = utils.find_resource(nova.servers, node.id)
+            #compute_name = compute._info['OS-EXT-SRV-ATTR:hypervisor_hostname'].split('.')[0]
+            compute_name = hostname()      
+            try:
+                testbed.add_resource(node.vm_name, hostname(), ip, flavor.vm_flavor.memory_mb, flavor.vm_flavor.vcpu_count, user, flavor.name, compute=compute_name, huge_pages=huge )
+                print(fmt % ( node.vm_name, hostname(), compute_name, ip )) 
+            except Exception as e:
+                print("WARNING: Error \"%s\"adding resource to reservation system" % e)
+
+class OFromDict(object):
+  def __init__(self, d):
+    self.__dict__ = d
+
+
+def vm_create_subcommand(driver, account, cmdargs):
+    """Process the VM create subcommand."""
+    if cmdargs.name and cmdargs.count != 1:
+        sys.exit("Error: when VM name is specified, the count must be 1")
+
+    rc, sizes = driver.get_flavor_list(account)
+    assert rc == RwStatus.SUCCESS
+
+    try:
+        size = [s for s in sizes.flavorinfo_list if s.name == cmdargs.flavor][0]
+    except IndexError:
+        sys.exit("Error: Failed to create VM, couldn't find flavor %s" % \
+                 cmdargs.flavor)
+    print(size)
+    rc, images = driver.get_image_list(account)
+    assert rc == RwStatus.SUCCESS
+    if images is None:
+       sys.exit("Error: No images found")
+    try:
+        image = [i for i in images.imageinfo_list if cmdargs.image in i.name][0]
+    except IndexError:
+        sys.exit("Error: Failed to create VM, couldn't find image %s" % \
+                 cmdargs.image)
+    print(image)
+
+    # VM name is not specified, so determine a unique VM name
+    # VM name should have the following format:
+    #     rwopenstack_<host>_vm<id>, e.g., rwopenstack_grunt16_vm1
+    # The following code gets the list of existing VMs and determines
+    # a unique id for the VM name construction.
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS
+    prefix = 'rwopenstack_%s_vm' % hostname()
+    vmid = 0;
+    for n in nodes.vminfo_list:
+        if n.vm_name.startswith(prefix):
+            temp_str = n.vm_name[len(prefix):]
+            if temp_str == '':
+                temp = 1
+            else:
+                temp = int(n.vm_name[len(prefix):])
+
+            if (temp > vmid):
+                vmid = temp
+
+    nodelist = []
+    for i in range(0, cmdargs.count):
+            if cmdargs.name:
+                vm_name = cmdargs.name
+            else:
+                vm_name = '%s%d' % (prefix, vmid+i+1)
+            rc, netlist = driver.get_network_list(account)
+            assert rc == RwStatus.SUCCESS      
+            for network in netlist.networkinfo_list:
+                 print(network)    
+
+            vm = RwcalYang.VMInfoItem()
+            vm.vm_name = vm_name
+            vm.flavor_id = size.id
+            vm.image_id  = image.id
+            vm.cloud_init.userdata = ''
+
+            nets = dict()
+            for network in netlist.networkinfo_list:
+                if network.network_name != "public":
+                    nwitem = RwcalYang.VMInfoItem_NetworkList()                        
+                    nwitem.network_id = network.network_id                 
+                    nets[network.network_name] = nwitem
+                     
+            logger.debug('creating VM using nets %s' % cmdargs.networks )
+            for net in cmdargs.networks.split(','):
+                if not net in nets:
+                    print(("Invalid network name '%s'" % net))
+                    print(('available nets are %s' % ','.join(list(nets.keys())) ))
+                    sys.exit(1)
+                if net != cmdargs.mgmt_network:
+                    vm.network_list.append(nets[net])
+
+            print(vm.network_list)
+            rc, node_id = driver.create_vm(account, vm) 
+
+            # wait for 1 to be up before starting the rest
+            # this is an attempt to make sure the image is cached
+            nodelist.append(node_id)
+            if i == 0 or cmdargs.wait_after_create is True:
+                #wait_until_running([node], timeout=300)
+                wait_till_active(driver, account, nodelist, timeout=300)               
+            print(node_id)
+    if cmdargs.reservation_server_url is not None:
+            if not cmdargs.wait_after_create:
+                print("Waiting for VMs to start")
+                wait_till_active(driver, account, nodelist, timeout=300)               
+                print("VMs are up")
+            header=True
+            for node in nodelist:
+                vm_register(node, driver, account, cmdargs, header)
+                header=False
+                
+
+def vm_destroy_subcommand(driver, account, cmdargs):
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS      
+    ct = len(nodes.vminfo_list)
+    if cmdargs.destroy_all or cmdargs.wait:
+        rc=0
+        for n in nodes.vminfo_list:
+            if testbed is not None:
+                try:
+                    testbed.remove_resource(n.vm_name)
+                except:
+                    print("WARNING: error deleting resource from reservation system")
+            if RwStatus.SUCCESS != driver.delete_vm(account, n.vm_id):
+                print('Error: failed to destroy node %s' % n.vm_name)
+                rc=1
+        if rc:
+            sys.exit(1)
+        if cmdargs.wait:
+            while ct > 0:
+                sys.stderr.write("waiting for %d VMs to exit...\n" % ct)
+                time.sleep(1)
+                try:
+                    rc, nodesnw = driver.get_vm_list(account)
+                    assert rc == RwStatus.SUCCESS      
+                    ct = len(nodesnw.vminfo_list )
+                except:
+                    pass
+        
+    else:
+        vm_re = re.compile('^%s$' % cmdargs.vm_name)
+        ct = 0
+        for n in nodes.vminfo_list:
+            if vm_re.match(n.vm_name):
+                ct += 1
+                if testbed is not None:
+                    try:
+                        testbed.remove_resource(n.vm_name)
+                    except:
+                        print("WARNING: error deleting resource from reservation system")
+                if RwStatus.SUCCESS != driver.delete_vm(account, n.vm_id):
+                    print('Error: failed to destroy node %s' % n.vm_name)
+                    return
+                print('destroyed %s' % n.vm_name)
+        if ct == 0:
+            print("No VMs matching \"%s\" found" % ( cmdargs.vm_name ))
+        
+                    
+def vm_rebuild_subcommand(driver, account, cmdargs):
+    images = driver.list_images()
+    found=0
+    for i in images:
+        if i.name == cmdargs.image_name:
+            found=1
+            break
+    if found != 1:
+        print('Error: Rebuild failed - image %s not found' % cmdargs.image_name)
+        sys.exit(1)
+    image=i
+    nodes = driver.list_nodes()
+    if cmdargs.rebuild_all:
+        rc=0
+        for n in nodes:
+            if not driver.ex_rebuild(n,image):
+                print('Error: failed to rebuild node %s' % n.name)
+                rc=1
+            if rc:
+               sys.exit(1)
+            rebuilt=0
+            while rebuilt != 1:
+                time.sleep(10)
+                nw_nodes = driver.list_nodes()
+                for nw in nw_nodes:
+                    if nw.name == n.name:
+                        if nw.state == n.state:
+                            rebuilt=1
+                        break  
+    else:
+        vm_re = re.compile('^%s$' % cmdargs.vm_name)
+        ct = 0
+        for n in nodes:
+            if vm_re.match(n.name):
+                ct += 1
+                if not driver.ex_rebuild(n,image):
+                    print('Error: failed to rebuild node %s' % n.name)
+                    return
+                print('Rebuilt %s' % n.name)
+                rebuilt=0
+                while rebuilt != 1:
+                    time.sleep(10)
+                    nw_nodes = driver.list_nodes()
+                    for nw in nw_nodes:
+                        if nw.name == n.name:
+                            if nw.state == n.state:
+                                rebuilt=1
+                            break  
+        if ct == 0:
+            print("No VMs matching \"%s\" found" % ( cmdargs.vm_name ))
+        
+                    
+
+def vm_reboot_subcommand(driver, account, cmdargs):
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS      
+    if cmdargs.reboot_all:
+        for n in nodes.vminfo_list:
+            '''
+            if not n.reboot():
+                print 'Error: failed to reboot node %s' % n.name
+            else:
+                print "rebooted %s" % n.name
+            '''
+            time.sleep(cmdargs.sleep_time)
+    else:
+        for n in nodes.vminfo_list:
+            if n.vm_name == cmdargs.vm_name:
+                if RwStatus.SUCCESS !=  driver.reboot_vm(account,n.vm_id):
+                    print('Error: failed to reboot node %s' % n.vm_name)
+                else:
+                    print("rebooted %s" % n.vm_name)
+                    
+
+def vm_start_subcommand(driver, account, cmdargs):
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS      
+    if cmdargs.start_all:
+        for n in nodes.vminfo_list:
+            print(dir(n))
+            if RwStatus.SUCCESS != driver.start_vm(account, n.vm_id):
+                print('Error: failed to start node %s' % n.vm_name)
+            else:
+                print("started %s" % n.vm_name)
+    else:
+        for n in nodes.vminfo_list:
+            if n.vm_name == cmdargs.vm_name:
+                if RwStatus.SUCCESS != driver.start_vm(account, n.vm_id):
+                    print('Error: failed to start node %s' % n.vm_name)
+                else:
+                    print("started %s" % n.vm_name)
+                    
+def vm_subcommand(driver, account, cmdargs):
+    """Process the vm subcommand"""
+
+    if cmdargs.which == 'list':
+        rc, nodes = driver.get_vm_list(account)
+        assert rc == RwStatus.SUCCESS  
+        for n in nodes.vminfo_list:
+            print(n)           
+            if n.state == 4:
+                if not cmdargs.ipsonly:
+                    print("%s is shutoff" % n.vm_name)
+            elif cmdargs.ipsonly:
+                i = n.management_ip
+                if i is not None:
+                    print(i)
+            else: 
+                if n.management_ip is not None:
+                    if len(n.private_ip_list) > 0:
+                        print("%s %s,%s" % (n.vm_name, n.management_ip, ",".join([i.get_ip_address() for i in n.private_ip_list])))
+                    else:
+                        print("%s %s" % (n.vm_name, n.management_ip))
+                else:
+                    print("%s NO IP" % n.vm_name)
+
+    elif cmdargs.which == 'create':
+        vm_create_subcommand(driver, account, cmdargs)
+
+    elif cmdargs.which == 'reboot':
+        vm_reboot_subcommand(driver, account, cmdargs)
+    elif cmdargs.which == 'start':
+        vm_start_subcommand(driver, account, cmdargs)
+    elif cmdargs.which == 'destroy':
+        vm_destroy_subcommand(driver, account, cmdargs)
+    #elif cmdargs.which == 'rebuild':
+    #    vm_rebuild_subcommand(driver, account, cmdargs)
+
+def image_delete_subcommand(driver, account, cmdargs):
+    rc,images = driver.get_image_list(account)
+    assert rc == RwStatus.SUCCESS
+    account.openstack.key          = 'admin'
+    if cmdargs.delete_all:
+        for i in images.imageinfo_list:
+            if RwStatus.SUCCESS != driver.delete_image(account, i.id):
+                print('Error: failed to delete image %s' % i.name)
+    else:
+        for i in images.imageinfo_list:
+            if i.name == cmdargs.image_name:
+                if RwStatus.SUCCESS != driver.delete_image(account, i.id):
+                    print('Error: failed to delete image %s' % i.name)
+
+def image_subcommand(driver, account, cmdargs):
+    """Process the image subcommand"""
+    if cmdargs.which == 'list':
+        rc, images = driver.get_image_list(account)
+        assert rc == RwStatus.SUCCESS
+
+        for i in images.imageinfo_list:
+            print(i)
+
+    elif cmdargs.which == 'delete':
+        image_delete_subcommand(driver, account, cmdargs)
+
+    elif cmdargs.which == 'create':
+        account.openstack.key          = 'admin'
+        rc, images = driver.get_image_list(account)
+        assert rc == RwStatus.SUCCESS
+        for i in images.imageinfo_list:
+            if i.name == cmdargs.image_name:
+                print("FATAL: image \"%s\" already exists" % cmdargs.image_name)
+                return 1
+        
+        print("creating image \"%s\" using %s ..." % \
+              (cmdargs.image_name, cmdargs.file_name))
+        img = RwcalYang.ImageInfoItem()
+        img.name = cmdargs.image_name
+        img.location = cmdargs.file_name
+        img.disk_format = "qcow2"
+        img.container_format = "bare"
+        rc, img_id = driver.create_image(account, img) 
+        print("... done. image_id is %s" % img_id)
+        return img_id
+
+    elif cmdargs.which == 'getid':
+        rc, images = driver.get_image_list(account)
+        assert rc == RwStatus.SUCCESS
+        found=0
+        for i in images.imageinfo_list:
+            if i.name == cmdargs.image_name:
+                print(i.id)
+                found += 1
+        if found != 1:
+            sys.exit(1)
+        
+def flavor_subcommand(driver, account, cmdargs):
+    """Process the flavor subcommand"""
+    if cmdargs.which == 'list':
+        rc, sizes = driver.get_flavor_list(account)
+        assert rc == RwStatus.SUCCESS
+        for f in sizes.flavorinfo_list:
+            rc, flv = driver.get_flavor(account, f.id)     
+            print(flv)     
+    elif cmdargs.which == 'create':
+        account.openstack.key          = 'admin'    
+        flavor                                     = RwcalYang.FlavorInfoItem()
+        flavor.name                                = cmdargs.flavor_name
+        flavor.vm_flavor.memory_mb                 = cmdargs.memory_size
+        flavor.vm_flavor.vcpu_count                = cmdargs.vcpu_count
+        flavor.vm_flavor.storage_gb                = cmdargs.disc_size
+        if cmdargs.hugepages_kilo:
+            flavor.guest_epa.mempage_size              = cmdargs.hugepages_kilo
+        if cmdargs.numa_nodes:
+            flavor.guest_epa.numa_node_policy.node_cnt = cmdargs.numa_nodes
+        if cmdargs.dedicated_cpu:
+            flavor.guest_epa.cpu_pinning_policy        = 'DEDICATED'
+        if cmdargs.pci_count:
+            dev = flavor.guest_epa.pcie_device.add()
+            dev.device_id = 'PCI_%dG_ALIAS' % (cmdargs.pci_speed)
+            dev.count = cmdargs.pci_count 
+        if cmdargs.colleto:
+            dev = flavor.guest_epa.pcie_device.add()
+            dev.device_id = 'COLETO_VF_ALIAS'
+            dev.count = cmdargs.colleto 
+        if cmdargs.trusted_host:
+            flavor.guest_epa.trusted_execution = True 
+
+        rc, flavor_id = driver.create_flavor(account, flavor)
+        assert rc == RwStatus.SUCCESS
+
+        print("created flavor %s id %s" % (cmdargs.flavor_name, flavor_id)) 
+
+    elif cmdargs.which == 'delete':
+        account.openstack.key          = 'admin'    
+        rc, sizes = driver.get_flavor_list(account)
+        assert rc == RwStatus.SUCCESS
+        for f in sizes.flavorinfo_list:
+            if f.name == cmdargs.flavor_name:
+                rc = driver.delete_flavor(account, f.id)
+                assert rc == RwStatus.SUCCESS
+
+def hostagg_subcommand(driver, account, cmdargs):
+    """Process the hostagg subcommand"""
+    if cmdargs.which == 'list':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            print("%-12s %-12s" % \
+                  (f.name, f.availability_zone))
+                
+    elif cmdargs.which == 'create':
+        nova = ra_nova_connect(project='admin')
+        hostagg = nova.aggregates.create(cmdargs.hostagg_name, 
+                                     cmdargs.avail_zone)
+        print("created hostagg %s in %s" % (hostagg.name, hostagg.availability_zone)) 
+
+    elif cmdargs.which == 'delete':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            if f.name == cmdargs.hostagg_name:
+                if cmdargs.force_delete_hosts:
+                    for h in f.hosts:
+                        f.remove_host(h)
+
+                f.delete()
+
+    elif cmdargs.which == 'addhost':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            if f.name == cmdargs.hostagg_name:
+                f.add_host(cmdargs.host_name)
+
+    elif cmdargs.which == 'delhost':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            if f.name == cmdargs.hostagg_name:
+                f.remove_host(cmdargs.host_name)
+
+    elif cmdargs.which == 'setmetadata':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            if f.name == cmdargs.hostagg_name:
+                d = dict([cmdargs.extra_specs.split("="),])                
+                f.set_metadata(d)
+
+def quota_subcommand(driver, account, cmdargs):
+    """Process the quota subcommand"""
+    nova = ra_nova_connect(project='admin')
+    cfgfile = get_openstack_file(None,  cmdargs.project)
+    kwargs = load_params(cfgfile)
+
+    keystone = keystone_client.Client(username=kwargs.get('OS_USERNAME'),
+                               password=kwargs.get('OS_PASSWORD'),
+                               tenant_name=kwargs.get('OS_TENANT_NAME'),
+                               auth_url=kwargs.get('OS_AUTH_URL'))
+    if cmdargs.which == 'set':
+        nova.quotas.update(keystone.tenant_id, 
+                           ram=cmdargs.memory, 
+                           floating_ips=cmdargs.ips, 
+                           instances=cmdargs.vms, 
+                           cores=cmdargs.vcpus)
+    elif cmdargs.which == 'get':
+        print("get quotas for tenant %s %s" % \
+              (cmdargs.project, keystone.tenant_id))
+        q = nova.quotas.get(keystone.tenant_id)
+        for att in [ 'ram', 'floating_ips', 'instances', 'cores' ]: 
+            print("%12s: %6d" % ( att, getattr(q, att) ))
+        
+def rules_subcommand(driver, account, cmdargs):
+    nova = ra_nova_connect(project='demo')
+    group=nova.security_groups.find(name='default')
+    if cmdargs.which == 'set':
+        try:
+            nova.security_group_rules.create(group.id,ip_protocol='tcp', from_port=1, to_port=65535 )
+        except BadRequest:
+            pass
+        try: 
+            nova.security_group_rules.create(group.id, ip_protocol='icmp',from_port=-1, to_port=-1 )
+        except BadRequest:
+            pass
+            
+    elif cmdargs.which == 'list':
+        for r in group.rules:
+            if r['from_port'] == -1:
+                print("rule %d proto %s from IP %s" % ( r['id'], r['ip_protocol'], r['ip_range']['cidr'] ))
+            else:
+                print("rule %d proto %s from port %d to %d from IP %s" % ( r['id'], r['ip_protocol'], r['from_port'], r['to_port'], r['ip_range']['cidr'] ))
+
+
+def register_subcommand(driver, account, cmdargs):
+    cmdargs.reserve_new_vms = False
+    vm_register('all', driver, account, cmdargs)       
+           
+##
+# Command line argument specification
+##
+desc="""This tool is used to manage the VMs"""
+kilo=platform.dist()[1]=='21'
+parser = argparse.ArgumentParser(description=desc)
+subparsers = parser.add_subparsers()
+ipaddr = socket.gethostbyname(socket.getfqdn())
+reservation_server_url = os.environ.get('RESERVATION_SERVER', 'http://reservation.eng.riftio.com:80')
+# ipaddr = netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']
+#default_auth_url = 'http://%s:5000/v3/' % ipaddr
+default_auth_url = 'http://10.66.4.27:5000/v3/'
+
+parser.add_argument('-t', '--provider-type', dest='provider_type',
+                    type=str, default='OPENSTACK', 
+                    help='Cloud provider type (default: %(default)s)')
+parser.add_argument('-u', '--user-name', dest='user', 
+                    type=str, default='demo', 
+                    help='User name (default: %(default)s)')
+parser.add_argument('-p', '--password', dest='passwd', 
+                    type=str, default='mypasswd', 
+                    help='Password (default: %(default)s)')
+parser.add_argument('-m', '--mgmt-nw', dest='mgmt_network', 
+                    type=str, default='private', 
+                    help='mgmt-network (default: %(default)s)')
+parser.add_argument('-a', '--auth-url', dest='auth_url', 
+                    type=str, default=default_auth_url, 
+                    help='Password (default: %(default)s)')
+parser.add_argument('-r', '--reservation_server_url', dest='reservation_server_url', 
+                    type=str, default=reservation_server_url, 
+                    help='reservation server url, use None to disable (default %(default)s)' )
+parser.add_argument('-d', '--debug', dest='debug', action='store_true', help='raise the logging level')
+
+##
+# Subparser for VM
+##
+vm_parser = subparsers.add_parser('vm')
+vm_subparsers = vm_parser.add_subparsers()
+
+# Create VM subparser
+vm_create_parser = vm_subparsers.add_parser('create')
+vm_create_parser.add_argument('-c', '--count',
+                              type=int, default=1,
+                              help='The number of VMs to launch '
+                                   '(default: %(default)d)')
+vm_create_parser.add_argument('-i', '--image', 
+                                                         default='rwopenstack_vm',
+                              help='Specify the image for the VM  (default: %(default)s)')
+vm_create_parser.add_argument('-n', '--name',
+                              help='Specify the name of the VM')
+vm_create_parser.add_argument('-f', '--flavor',
+                              help='Specify the flavor for the VM')
+vm_create_parser.add_argument('-R', '--reserve', dest='reserve_new_vms', 
+                    action='store_true', help='reserve any newly created VMs')
+vm_create_parser.add_argument('-s', '--single', dest='wait_after_create', 
+                    action='store_true', help='wait for each VM to start before creating the next')
+vm_create_parser.add_argument('-N', '--networks', dest='networks', type=str, 
+                                default='private,private2,private3,private4',
+                                help='comma separated list of networks to connect these VMs to (default: %(default)s)' )
+
+vm_create_parser.set_defaults(which='create')
+# Reboot VM subparser
+vm_reboot_parser = vm_subparsers.add_parser('reboot')
+group = vm_reboot_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--vm-name', dest='vm_name',
+                   type=str,
+                   help='Specify the name of the VM')
+group.add_argument('-a', '--reboot-all', 
+                   dest='reboot_all', action='store_true',
+                   help='Reboot all VMs')
+vm_reboot_parser.add_argument('-s', '--sleep', dest='sleep_time', type=int, default=4, help='time in seconds to sleep between reboots')
+vm_reboot_parser.set_defaults(which='reboot')
+
+
+"""
+# start VM subparser
+vm_start_parser = vm_subparsers.add_parser('start')
+group = vm_start_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--vm-name', dest='vm_name',
+                   type=str,
+                   help='Specify the name of the VM')
+group.add_argument('-a', '--start-all', 
+                   dest='start_all', action='store_true',
+                   help='Start all VMs')
+vm_start_parser.set_defaults(which='start')
+"""
+
+# Destroy VM subparser
+vm_destroy_parser = vm_subparsers.add_parser('destroy')
+group = vm_destroy_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--vm-name', dest='vm_name',
+                   type=str,
+                   help='Specify the name of the VM (accepts regular expressions)')
+group.add_argument('-a', '--destroy-all', 
+                   dest='destroy_all', action='store_true',
+                   help='Delete all VMs')
+group.add_argument('-w', '--wait', 
+                   dest='wait', action='store_true',
+                   help='destroy all and wait until all VMs have exited')
+vm_destroy_parser.set_defaults(which='destroy')
+
+# Rebuild VM subparser
+vm_rebuild_parser = vm_subparsers.add_parser('rebuild')
+group = vm_rebuild_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--vm-name', dest='vm_name',
+                   type=str,
+                   help='Specify the name of the VM (accepts regular expressions)')
+group.add_argument('-a', '--rebuild-all', 
+                   dest='rebuild_all', action='store_true',
+                   help='Rebuild all VMs')
+vm_rebuild_parser.add_argument('-i', '--image-name', dest='image_name',
+                              type=str,
+                              help='Specify the name of the image')
+vm_rebuild_parser.set_defaults(which='rebuild')
+
+# List VM subparser
+vm_list_parser = vm_subparsers.add_parser('list')
+vm_list_parser.set_defaults(which='list')
+vm_list_parser.add_argument('-i', '--ips_only', dest='ipsonly', 
+                            action='store_true', 
+                            help='only list IP addresses')
+
+vm_parser.set_defaults(func=vm_subcommand)
+
+##
+# Subparser for image
+##
+image_parser = subparsers.add_parser('image')
+image_subparsers = image_parser.add_subparsers()
+
+# List image subparser
+image_list_parser = image_subparsers.add_parser('list')
+image_list_parser.set_defaults(which='list')
+
+# Delete image subparser
+image_destroy_parser = image_subparsers.add_parser('delete')
+group = image_destroy_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--image-name', dest='image_name',
+                   type=str,
+                   help='Specify the name of the image')
+group.add_argument('-a', '--delete-all', 
+                   dest='delete_all', action='store_true',
+                   help='Delete all images')
+image_destroy_parser.set_defaults(which='delete')
+
+# create image
+image_create_parser = image_subparsers.add_parser('create')
+image_create_parser.set_defaults(which='create')
+image_create_parser.add_argument('-n', '--image-name', dest='image_name',
+                                  type=str,
+                                  default="rwopenstack_vm",
+                                  help='Specify the name of the image')
+image_create_parser.add_argument('-f', '--filename', dest='file_name',
+                                  type=str, 
+                                  default='/net/sharedfiles/home1/common/vm/rift-root-current.qcow2',
+                                  help='name of the existing qcow2 image file')
+
+
+image_create_parser = image_subparsers.add_parser('getid')
+image_create_parser.set_defaults(which='getid')
+image_create_parser.add_argument('-n', '--image-name', dest='image_name',
+                                  type=str,
+                                  default="rwopenstack_vm",
+                                  help='Specify the name of the image')
+image_parser.set_defaults(func=image_subcommand)
+
+##
+# Subparser for flavor
+##
+flavor_parser = subparsers.add_parser('flavor')
+flavor_subparsers = flavor_parser.add_subparsers()
+
+# List flavor subparser
+flavor_list_parser = flavor_subparsers.add_parser('list')
+flavor_list_parser.set_defaults(which='list')
+
+# Create flavor subparser
+flavor_create_parser = flavor_subparsers.add_parser('create')
+flavor_create_parser.set_defaults(which='create')
+flavor_create_parser.add_argument('-n', '--flavor-name', dest='flavor_name',
+                                  type=str,
+                                  help='Specify the name of the flavor')
+flavor_create_parser.add_argument('-m', '--memory-size', dest='memory_size',
+                                  type=int, default=1024,
+                                  help='Specify the size of the memory in MB '
+                                       '(default: %(default)d)')
+flavor_create_parser.add_argument('-d', '--disc-size', dest='disc_size',
+                                  type=int, default=16,
+                                  help='Specify the size of the disc in GB '
+                                       '(default: %(default)d)')
+flavor_create_parser.add_argument('-v', '--vcpu-count', dest='vcpu_count',
+                                  type=int, default=1,
+                                  help='Specify the number of VCPUs '
+                                       '(default: %(default)d)')
+flavor_create_parser.add_argument('-p', '--pci-count', dest='pci_count',
+                                  type=int, default=0,
+                                  help='Specify the number of PCI devices '
+                                       '(default: %(default)d)')
+flavor_create_parser.add_argument('-s', '--pci-speed', dest='pci_speed',
+                                  type=int, default=10,
+                                  help='Specify the speed of the PCI devices in Gbps (default: %(default)d)')
+flavor_create_parser.add_argument('-e', '--hostagg-extra-specs', dest='extra_specs',
+                                  type=str, 
+                                  help='Specify the extra spec ')
+flavor_create_parser.add_argument('-b', '--back-with-hugepages', dest='enable_hugepages',
+                                  action='store_true',
+                                  help='Enable memory backing with hugepages')
+flavor_create_parser.add_argument('-B', '--back-with-hugepages-kilo', dest='hugepages_kilo',
+                                  type=str,
+                                  help='Enable memory backing with hugepages for kilo')
+flavor_create_parser.add_argument('-D', '--dedicated_cpu', dest='dedicated_cpu',
+                                  action='store_true',
+                                  help='Dedicated CPU usage')
+flavor_create_parser.add_argument('-T', '--cpu_threads', dest='cpu_threads',
+                                  type=str, 
+                                  help='CPU threads usage')
+flavor_create_parser.add_argument('-N', '--numa_nodes', dest='numa_nodes',
+                                  type=int, 
+                                  help='Configure numa nodes')
+flavor_create_parser.add_argument('-t', '--trusted-host', dest='trusted_host',  action='store_true', help='restrict instances to trusted hosts')
+flavor_create_parser.add_argument('-c', '--crypto-cards', dest='colleto',  type=int, default=0,  \
+                                    help='how many colleto creek VFs should be passed thru to the VM')
+
+# Delete flavor subparser
+flavor_delete_parser = flavor_subparsers.add_parser('delete')
+flavor_delete_parser.set_defaults(which='delete')
+flavor_delete_parser.add_argument('-n', '--flavor-name', dest='flavor_name',
+                                  type=str,
+                                  help='Specify the name of the flavor')
+
+flavor_parser.set_defaults(func=flavor_subcommand)
+
+##
+# Subparser for host-aggregate 
+##
+hostagg_parser = subparsers.add_parser('hostagg')
+hostagg_subparsers = hostagg_parser.add_subparsers()
+
+# List host-aggregate subparser
+hostagg_list_parser = hostagg_subparsers.add_parser('list')
+hostagg_list_parser.set_defaults(which='list')
+
+# Create hostagg subparser
+hostagg_create_parser = hostagg_subparsers.add_parser('create')
+hostagg_create_parser.set_defaults(which='create')
+hostagg_create_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_create_parser.add_argument('-a', '--avail-zone', dest='avail_zone',
+                                  type=str,
+                                  help='Specify the name of the availability zone')
+# Delete hostagg subparser
+hostagg_delete_parser = hostagg_subparsers.add_parser('delete')
+hostagg_delete_parser.set_defaults(which='delete')
+hostagg_delete_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_delete_parser.add_argument('-f', '--force-delete-hosts', dest='force_delete_hosts',
+                                  action='store_true',
+                                  help='Delete the existing hosts')
+
+# Add host subparser
+hostagg_addhost_parser = hostagg_subparsers.add_parser('addhost')
+hostagg_addhost_parser.set_defaults(which='addhost')
+hostagg_addhost_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_addhost_parser.add_argument('-c', '--compute-host-name', dest='host_name',
+                                  type=str,
+                                  help='Specify the name of the host to be added')
+
+# Remove host subparser
+hostagg_delhost_parser = hostagg_subparsers.add_parser('delhost')
+hostagg_delhost_parser.set_defaults(which='delhost')
+hostagg_delhost_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_delhost_parser.add_argument('-c', '--compute-host-name', dest='host_name',
+                                  type=str,
+                                  help='Specify the name of the host to be removed')
+
+# Set meta-data subparser
+hostagg_setdata_parser = hostagg_subparsers.add_parser('setmetadata')
+hostagg_setdata_parser.set_defaults(which='setmetadata')
+hostagg_setdata_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_setdata_parser.add_argument('-d', '--meta-data', dest='extra_specs',
+                                  type=str,
+                                  help='Specify the meta-data to be associated to this host aggregate')
+
+hostagg_parser.set_defaults(func=hostagg_subcommand)
+
+##
+# Subparser for quota
+##
+quota_parser = subparsers.add_parser('quota')
+quota_subparser = quota_parser.add_subparsers()
+quota_set_parser = quota_subparser.add_parser('set')
+
+# quota set subparser
+quota_set_parser.set_defaults(which='set')
+quota_set_parser.add_argument('-p', '--project', dest='project', 
+                              type=str, default='demo', 
+                              help='project name that you wish to set '
+                                   'the quotas for')
+quota_set_parser.add_argument('-c', '--vcpus', dest='vcpus', 
+                              type=int, default=48, 
+                              help='Maximum number of virtual CPUs that can '
+                                   'be assigned to all VMs in aggregate')
+quota_set_parser.add_argument('-v', '--vms', dest='vms', 
+                              type=int, default=24, 
+                              help='Maximum number of VMs that can be created ' 
+                                   'on this openstack instance '
+                                   '(which may be more than 1 machine)')
+quota_set_parser.add_argument('-i', '--ips', dest='ips', 
+                              type=int, default=250, 
+                              help='Maximum number of Floating IP Addresses '
+                                   'that can be assigned to all VMs '
+                                   'in aggregate')
+quota_set_parser.add_argument('-m', '--memory', dest='memory', 
+                              type=int, default=122880, 
+                              help='Maximum amount of RAM in MB that can be '
+                                   'assigned to all VMs in aggregate')
+
+# quota get subparser
+quota_get_parser = quota_subparser.add_parser('get')
+quota_get_parser.add_argument('-p', '--project', dest='project', 
+                              type=str, default='demo', 
+                              help='project name that you wish to get '
+                                   'the quotas for')
+quota_get_parser.set_defaults(which='get')
+quota_parser.set_defaults(func=quota_subcommand)
+
+##
+# rules subparser
+##
+rules_parser = subparsers.add_parser('rules')
+rules_parser.set_defaults(func=rules_subcommand)
+rules_subparser = rules_parser.add_subparsers()
+rules_set_parser = rules_subparser.add_parser('set')
+rules_set_parser.set_defaults(which='set')
+rules_list_parser = rules_subparser.add_parser('list')
+rules_list_parser.set_defaults(which='list')
+
+register_parser = subparsers.add_parser('register')
+register_parser.set_defaults(func=register_subcommand)
+cmdargs = parser.parse_args()
+
+
+if __name__ == "__main__":
+    logger=logging.getLogger(__name__)
+    if cmdargs.debug:
+        logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s', level=logging.DEBUG) 
+    else:
+        logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s', level=logging.WARNING) 
+
+    if cmdargs.provider_type == 'OPENSTACK':
+        #cls = get_driver(Provider.OPENSTACK)
+        pass
+    elif cmdargs.provider_type == 'VSPHERE':
+        cls = get_driver(Provider.VSPHERE)
+    else:
+        sys.exit("Cloud provider %s is NOT supported yet" % cmdargs.provider_type)
+
+    if cmdargs.reservation_server_url == "None" or cmdargs.reservation_server_url == "":
+        cmdargs.reservation_server_url = None
+    if cmdargs.reservation_server_url is not None:
+        sys.path.append('/usr/rift/lib')
+        try:
+            import ndl
+        except Exception as e:
+            logger.warning("Error loading Reservation library")
+            testbed=None
+        else:
+            testbed=ndl.Testbed()
+            testbed.set_server(cmdargs.reservation_server_url)
+            
+
+
+    if cmdargs.provider_type == 'OPENSTACK':
+        account                        = RwcalYang.CloudAccount()
+        account.account_type           = "openstack"
+        account.openstack.key          = cmdargs.user
+        account.openstack.secret       = cmdargs.passwd
+        account.openstack.auth_url     = cmdargs.auth_url
+        account.openstack.tenant       = cmdargs.user
+        account.openstack.mgmt_network = cmdargs.mgmt_network
+
+        plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+        engine, info, extension = plugin()
+        driver = plugin.get_interface("Cloud")
+        # Get the RwLogger context
+        rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+        try:
+            rc = driver.init(rwloggerctx)
+            assert rc == RwStatus.SUCCESS
+        except:
+            logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+        else:
+            logger.info("Openstack Cal plugin successfully instantiated")
+
+        cmdargs.func(driver, account, cmdargs)
+
+    elif cmdargs.provider_type == 'VSPHERE':
+        driver = cls(cmdargs.user, cmdargs.passwd, host='vcenter' )
+        cmdargs.func(driver, cmdargs)
diff --git a/modules/core/rwvx/rwcal/test/ec2.py b/modules/core/rwvx/rwcal/test/ec2.py
new file mode 100644 (file)
index 0000000..4709500
--- /dev/null
@@ -0,0 +1,263 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import glob
+import itertools
+import os
+
+import boto
+import boto.vpc
+
+# TODO:  Pull the lastest of owned instances.
+__default_instance_ami__ = 'ami-e421bc8c'
+
+# TODO:  Make VPC's per user?
+__default_subnet__ = 'subnet-4b484363'
+__default_security_group__ = 'sg-d9da90bc'
+
+__default_instance_type__ = 'm1.medium'
+__default_vpc__ = 'vpc-e7ed4482'
+
+class RWEC2(object):
+    def __init__(self,  subnet=None, ami=None):
+        self._subnet = subnet if subnet is not None else __default_subnet__
+        self._ami = ami if ami is not None else __default_instance_ami__
+
+        self._conn = boto.connect_ec2()
+
+    @staticmethod
+    def cloud_init_current_user():
+        """
+        Return user_data configuration suitable for cloud-init that will create a user
+        with sudo and ssh key access on the remote instance.
+
+        ssh keys are found with the glob ~/.ssh/*pub*
+        """
+        user_data = "users:\n"
+        user_data += " - name: %s\n" % (os.getlogin(),)
+        user_data += "   groups: [wheel, adm, systemd-journal]\n"
+        user_data += "   sudo: [\"ALL=(ALL) NOPASSWD:ALL\"]\n"
+        user_data += "   shell: /bin/bash\n"
+        user_data += "   ssh_authorized_keys:\n"
+        for pub_key in glob.glob('%s/.ssh/*pub*' % (os.environ['HOME'],)):
+            with open(pub_key) as fp:
+                user_data += "    -  %s" % (fp.read(),)
+
+        return user_data
+
+
+    @staticmethod
+    def cloud_init_yum_repos():
+        """
+        Return a string of user_data commands that can be used to update the yum
+        repos to point to the correct location.  They should be added by the caller
+        within a 'runcmd:' block.
+        """
+        ret = " - sed -i -e 's,www\.,,' -e 's,riftio\.com/mirrors,riftio.com:8881,' /etc/yum.repos.d/*.repo\n"
+        return ret
+
+    def instances(self, cluster_component, cluster_instance):
+        """
+        List of instances owned by the given cluster instance
+
+        @param cluster_component  - parent cluster of each instance
+        @param cluster_instance   - instance id of the owning cluster
+        @param n_instances        - number of requested instances
+
+        @return                   - list of boto.ec2.instance.Instances provisioned
+        """
+        ret = []
+        reservations = self._conn.get_all_instances()
+        for instance in [instance for reservation in reservations for instance in reservation.instances]:
+            tags = instance.tags
+            if (tags.get('parent_component') == cluster_component
+                    and tags.get('parent_instance') == cluster_instance):
+                ret.append(instance)
+
+        return ret
+
+    def provision_master(self, cluster_component, cluster_instance):
+        """
+        Provision a master instance in EC2.  The master instance is a special instance with the
+        following features:
+            - Public IP
+            - /home shared over NFS
+
+        @param cluster_component  - parent cluster of each instance
+        @param cluster_instance   - instance id of the owning cluster
+
+        @return                   - boto.ec2.instance.Instances provisioned
+        """
+        vpc = boto.vpc.VPCConnection()
+        subnet = vpc.get_all_subnets(subnet_ids=__default_subnet__)[0]
+        cidr_block = subnet.cidr_block
+        vpc.close()
+
+        user_data = "#cloud-config\n"
+        user_data += "runcmd:\n"
+        user_data += " - echo '/home %s(rw,root_squash,sync)' >  /etc/exports\n" % (cidr_block,)
+        user_data += " - systemctl start nfs-server\n"
+        user_data += " - systemctl enable nfs-server\n"
+        user_data += self.cloud_init_yum_repos()
+        user_data += self.cloud_init_current_user()
+
+
+        net_if = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+                subnet_id=__default_subnet__,
+                groups=[__default_security_group__,],
+                associate_public_ip_address=True)
+
+        net_ifs = boto.ec2.networkinterface.NetworkInterfaceCollection(net_if)
+
+        new_reservation = self._conn.run_instances(
+                image_id=self._ami,
+                min_count=1,
+                max_count=1,
+                instance_type=__default_instance_type__,
+                network_interfaces=net_ifs,
+                tenancy='default',
+                user_data=user_data)
+        instance = new_reservation.instances[0]
+
+        instance.add_tag('parent_component', cluster_component)
+        instance.add_tag('parent_instance', cluster_instance)
+        instance.add_tag('master', 'self')
+
+        return instance
+
+
+    def provision(self, cluster_component, cluster_instance, n_instances=1, master_instance=None, net_ifs=None):
+        """
+        Provision a number of EC2 instanced to be used in a cluster.
+
+        @param cluster_component  - parent cluster of each instance
+        @param cluster_instance   - instance id of the owning cluster
+        @param n_instances        - number of requested instances
+        @param master_instance    - if specified, the boto.ec2.instance.Instance that is providing master
+                                    services for this cluster
+
+        @return                   - list of boto.ec2.instance.Instances provisioned
+        """
+        instances = []
+        cluster_instance = int(cluster_instance)
+
+        def posess_instance(instance):
+            instances.append(instance)
+            instance.add_tag('parent_component', cluster_component)
+            instance.add_tag('parent_instance', cluster_instance)
+            if master_instance is not None:
+                instance.add_tag('master', master_instance.id)
+            else:
+                instance.add_tag('master', 'None')
+
+        user_data = "#cloud-config\n"
+        user_data += self.cloud_init_current_user()
+        user_data += "runcmd:\n"
+        user_data += self.cloud_init_yum_repos()
+
+        if master_instance is not None:
+            user_data += " - echo '%s:/home /home nfs rw,soft,sync 0 0' >> /etc/fstab\n" % (
+                    master_instance.private_ip_address,)
+            user_data += " - mount /home\n"
+
+        if net_ifs is not None:
+            kwds = {'subnet_id': __default_subnet__}
+        else:
+            kwds = {'network_interfaces': net_ifs}
+            print net_ifs
+
+        new_reservation = self._conn.run_instances(
+            image_id=self._ami,
+            min_count=n_instances,
+            max_count=n_instances,
+            instance_type=__default_instance_type__,
+            tenancy='default',
+            user_data=user_data,
+            network_interfaces=net_ifs)
+
+        _ = [posess_instance(i) for i in new_reservation.instances]
+
+        return instances
+
+    def stop(self, instance_id, free_resources=True):
+        """
+        Stop the specified instance, freeing all allocated resources (elastic ips, etc) if requested.
+
+        @param instance_id      - name of the instance to stop
+        @param free_resource    - If True that all resources that were only owned by this instance
+                                  will be deallocated as well.
+        """
+        self._conn.terminate_instances(instance_ids=[instance_id,])
+
+    def fastpath111(self):
+        vpc_conn = boto.vpc.VPCConnection()
+        vpc = vpc_conn.get_all_vpcs(vpc_ids=[__default_vpc__,])[0]
+        subnet_addrs_split = vpc.cidr_block.split('.')
+
+        networks = {
+            'mgmt': [s for s in vpc_conn.get_all_subnets() if s.id == __default_subnet__][0],
+            'tg_fabric': None,
+            'ts_fabric': None,
+            'tg_lb_ext': None,
+            'lb_ts_ext': None,
+        }
+
+        for i, network in enumerate([n for n, s in networks.items() if s == None]):
+            addr = "%s.%s.10%d.0/25" % (subnet_addrs_split[0], subnet_addrs_split[1], i)
+            try:
+                subnet = vpc_conn.create_subnet(vpc.id, addr)
+            except boto.exception.EC2ResponseError, e:
+                if 'InvalidSubnet.Conflict' == e.error_code:
+                    subnet = vpc_conn.get_all_subnets(filters=[('vpcId', vpc.id), ('cidrBlock', addr)])[0]
+                else:
+                    raise
+
+            networks[network] = subnet
+
+        def create_interfaces(nets):
+            ret = boto.ec2.networkinterface.NetworkInterfaceCollection()
+
+            for i, network in enumerate(nets):
+                spec = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+                        subnet_id=networks[network].id,
+                        description='%s iface' % (network,),
+                        groups=[__default_security_group__],
+                        device_index=i)
+                ret.append(spec)
+
+            return ret
+
+        ret = {}
+
+        ret['cli'] = self.provision_master('fp111', 1)
+        ret['cli'].add_tag('Name', 'cli')
+
+        net_ifs = create_interfaces(['mgmt'])
+        ret['mgmt'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['mgmt'].add_tag('Name', 'mgmt')
+
+        net_ifs = create_interfaces(['mgmt', 'tg_fabric'])
+        ret['tg1'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['tg1'].add_tag('Name', 'tg1')
+
+        net_ifs = create_interfaces(['mgmt', 'tg_fabric', 'tg_lb_ext'])
+        ret['tg2'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['tg2'].add_tag('Name', 'tg2')
+
+        net_ifs = create_interfaces(['mgmt', 'ts_fabric'])
+        ret['ts1'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['ts1'].add_tag('Name', 'ts1')
+
+        net_ifs = create_interfaces(['mgmt', 'ts_fabric', 'lb_ts_ext'])
+        ret['ts3'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['ts3'].add_tag('Name', 'ts3')
+
+        net_ifs = create_interfaces(['mgmt', 'ts_fabric', 'lb_ts_ext', 'tg_lb_ext'])
+        ret['ts2'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['ts2'].add_tag('Name', 'ts2')
+
+        return ret
+
+# vim: sw=4
diff --git a/modules/core/rwvx/rwcal/test/openstack_resources.py b/modules/core/rwvx/rwcal/test/openstack_resources.py
new file mode 100755 (executable)
index 0000000..45c0ff1
--- /dev/null
@@ -0,0 +1,468 @@
+#!/usr/bin/env python3
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+import logging
+import rw_peas
+import rwlogger
+import time
+import argparse
+import os
+import sys
+import uuid
+from os.path import basename
+
+FLAVOR_NAME = 'm1.medium'
+DEFAULT_IMAGE='/net/sharedfiles/home1/common/vm/rift-root-latest.qcow2'
+
+persistent_resources = {
+    'vms'      : ['mission_control','launchpad',],
+    'networks' : ['public', 'private', 'multisite'],
+    'flavors'  : ['m1.tiny', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge'],
+    'images'   : ['rwimage','rift-root-latest.qcow2','rift-root-latest-trafgen.qcow2', 'rift-root-latest-trafgen-f.qcow2']
+}
+
+#
+# Important information about openstack installation. This needs to be manually verified 
+#
+openstack_info = {
+    'username'           : 'pluto',
+    'password'           : 'mypasswd',
+    'project_name'       : 'demo',
+    'mgmt_network'       : 'private',
+    'physical_network'   : 'physnet1',
+    'network_type'       : 'VLAN',
+    'segmentation_id'    : 42, ### What else?
+    'subnets'            : ["11.0.0.0/24", "12.0.0.0/24", "13.0.0.0/24", "14.0.0.0/24"],
+    'subnet_index'       : 0,
+    }
+
+
+logging.basicConfig(level=logging.INFO)
+
+USERDATA_FILENAME = os.path.join(os.environ['RIFT_INSTALL'],
+                                 'etc/userdata-template')
+
+
+RIFT_BASE_USERDATA = '''
+#cloud-config
+runcmd:
+ - sleep 5
+ - /usr/rift/scripts/cloud/enable_lab
+ - /usr/rift/etc/fix_this_vm
+'''
+
+try:
+    fd = open(USERDATA_FILENAME, 'r')
+except Exception as e:
+    #logger.error("Received exception during opening of userdata (%s) file. Exception: %s" %(USERDATA_FILENAME, str(e)))
+    sys.exit(-1)
+else:
+    LP_USERDATA_FILE = fd.read()
+    # Run the enable lab script when the openstack vm comes up
+    LP_USERDATA_FILE += "runcmd:\n"
+    LP_USERDATA_FILE += " - /usr/rift/scripts/cloud/enable_lab\n"
+    LP_USERDATA_FILE += " - /usr/rift/etc/fix_this_vm\n"
+
+
+
+def get_cal_plugin():
+    """
+    Loads rw.cal plugin via libpeas
+    """
+    plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+    engine, info, extension = plugin()
+    cal = plugin.get_interface("Cloud")
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+    try:
+        rc = cal.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except:
+        logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+    else:
+        logger.info("Openstack Cal plugin successfully instantiated")
+        return cal 
+    
+def get_cal_account(auth_url):
+    """
+    Returns cal account
+    """
+    account                        = RwcalYang.CloudAccount()
+    account.account_type           = "openstack"
+    account.openstack.key          = openstack_info['username']
+    account.openstack.secret       = openstack_info['password']
+    account.openstack.auth_url     = auth_url
+    account.openstack.tenant       = openstack_info['project_name']
+    account.openstack.mgmt_network = openstack_info['mgmt_network']
+    return account
+
+
+logger = logging.getLogger('rift.cal.openstackresources')
+
+class OpenstackResources(object):
+    """
+    A stupid class to manage bunch of openstack resources
+    """
+    def __init__(self, controller):    
+        self._cal      = get_cal_plugin()
+        self._acct     = get_cal_account('http://'+controller+':5000/v3/')
+        self._id       = 0
+        self._image_id = None
+        self._flavor_id = None
+        
+    def _destroy_vms(self):
+        """
+        Destroy VMs
+        """
+        logger.info("Initiating VM cleanup")
+        rc, rsp = self._cal.get_vdu_list(self._acct)
+        vdu_list = [vm for vm in rsp.vdu_info_list if vm.name not in persistent_resources['vms']]
+        logger.info("Deleting VMs : %s" %([x.name for x in vdu_list]))
+        
+        for vdu in vdu_list:
+            self._cal.delete_vdu(self._acct, vdu.vdu_id)
+
+        logger.info("VM cleanup complete")
+
+    def _destroy_networks(self):
+        """
+        Destroy Networks
+        """
+        logger.info("Initiating Network cleanup")
+        rc, rsp = self._cal.get_virtual_link_list(self._acct)
+        vlink_list = [vlink for vlink in rsp.virtual_link_info_list if vlink.name not in persistent_resources['networks']]
+
+        logger.info("Deleting Networks : %s" %([x.name for x in vlink_list]))
+        for vlink in vlink_list:
+            self._cal.delete_virtual_link(self._acct, vlink.virtual_link_id)
+        logger.info("Network cleanup complete")
+
+    def _destroy_flavors(self):
+        """
+        Destroy Flavors
+        """
+        logger.info("Initiating flavor cleanup")
+        rc, rsp = self._cal.get_flavor_list(self._acct)
+        flavor_list = [flavor for flavor in rsp.flavorinfo_list if flavor.name not in persistent_resources['flavors']]
+            
+        logger.info("Deleting flavors : %s" %([x.name for x in flavor_list]))
+
+        for flavor in flavor_list:
+            self._cal.delete_flavor(self._acct, flavor.id)
+            
+        logger.info("Flavor cleanup complete")
+
+    def _destroy_images(self):
+        logger.info("Initiating image cleanup")
+        rc, rsp = self._cal.get_image_list(self._acct)
+        image_list = [image for image in rsp.imageinfo_list if image.name not in persistent_resources['images']]
+
+        logger.info("Deleting images : %s" %([x.name for x in image_list]))
+            
+        for image in image_list:
+            self._cal.delete_image(self._acct, image.id)
+            
+        logger.info("Image cleanup complete")
+        
+    def destroy_resource(self):
+        """
+        Destroy resources
+        """
+        logger.info("Cleaning up openstack resources")
+        self._destroy_vms()
+        self._destroy_networks()
+        self._destroy_flavors()
+        self._destroy_images()
+        logger.info("Cleaning up openstack resources.......[Done]")
+
+    def create_mission_control(self):
+        vm_id = self.create_vm('mission_control',
+                               userdata = RIFT_BASE_USERDATA)
+        return vm_id
+    
+
+    def create_launchpad_vm(self, salt_master=None):
+        node_id = str(uuid.uuid4())
+        if salt_master is not None:
+           userdata = LP_USERDATA_FILE.format(master_ip = salt_master,
+                                           lxcname = node_id)
+        else:
+           userdata = RIFT_BASE_USERDATA
+
+        vm_id = self.create_vm('launchpad',
+                              userdata = userdata,
+                              node_id = node_id)
+#        vm_id = self.create_vm('launchpad2',
+#                               userdata = userdata,
+#                               node_id = node_id)
+        return vm_id
+    
+    def create_vm(self, name, userdata, node_id = None):
+        """
+        Creates a VM. The VM name is derived from username
+
+        """
+        vm = RwcalYang.VDUInitParams()
+        vm.name = name
+        vm.flavor_id = self._flavor_id
+        vm.image_id  = self._image_id
+        if node_id is not None:
+            vm.node_id = node_id
+        vm.vdu_init.userdata = userdata
+        vm.allocate_public_address = True
+        logger.info("Starting a VM with parameter: %s" %(vm))
+     
+        rc, vm_id = self._cal.create_vdu(self._acct, vm)
+        assert rc == RwStatus.SUCCESS
+        logger.info('Created vm: %s with id: %s', name, vm_id)
+        return vm_id
+        
+    def create_network(self, name):
+        logger.info("Creating network with name: %s" %name)
+        network                = RwcalYang.NetworkInfoItem()
+        network.network_name   = name
+        network.subnet         = openstack_info['subnets'][openstack_info['subnet_index']]
+
+        if openstack_info['subnet_index'] == len(openstack_info['subnets']):
+            openstack_info['subnet_index'] = 0
+        else:
+            openstack_info['subnet_index'] += 1
+        
+        if openstack_info['physical_network']:
+            network.provider_network.physical_network = openstack_info['physical_network']
+        if openstack_info['network_type']:
+            network.provider_network.overlay_type     = openstack_info['network_type']
+        if openstack_info['segmentation_id']:
+            network.provider_network.segmentation_id  = openstack_info['segmentation_id']
+            openstack_info['segmentation_id'] += 1
+
+        rc, net_id = self._cal.create_network(self._acct, network)
+        assert rc == RwStatus.SUCCESS
+
+        logger.info("Successfully created network with id: %s" %net_id)
+        return net_id
+    
+        
+
+    def create_image(self, location):
+        img = RwcalYang.ImageInfoItem()
+        img.name = basename(location)
+        img.location = location
+        img.disk_format = "qcow2"
+        img.container_format = "bare"
+
+        logger.info("Uploading image : %s" %img.name)
+        rc, img_id = self._cal.create_image(self._acct, img)
+        assert rc == RwStatus.SUCCESS
+
+        rs = None
+        rc = None
+        image = None
+        for i in range(100):
+            rc, rs = self._cal.get_image(self._acct, img_id)
+            assert rc == RwStatus.SUCCESS
+            logger.info("Image (image_id: %s) reached status : %s" %(img_id, rs.state))
+            if rs.state == 'active':
+                image = rs
+                break
+            else:
+                time.sleep(2) # Sleep for a second
+
+        if image is None:
+            logger.error("Failed to upload openstack image: %s", img)
+            sys.exit(1)
+
+        self._image_id = img_id
+        logger.info("Uploading image.......[Done]")
+        
+    def create_flavor(self):
+        """
+        Create Flavor suitable for rift_ping_pong VNF
+        """
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name = FLAVOR_NAME
+        flavor.vm_flavor.memory_mb   = 16384 # 16GB
+        flavor.vm_flavor.vcpu_count  = 4 
+        flavor.vm_flavor.storage_gb  = 20 # 20 GB
+
+        logger.info("Creating new flavor. Flavor Info: %s" %str(flavor.vm_flavor))
+
+        rc, flavor_id = self._cal.create_flavor(self._acct, flavor)
+        assert rc == RwStatus.SUCCESS
+        logger.info("Creating new flavor.......[Done]")
+        return flavor_id
+
+    def find_image(self, name):
+        logger.info("Searching for uploaded image: %s" %name)
+        rc, rsp = self._cal.get_image_list(self._acct)
+        image_list = [image for image in rsp.imageinfo_list if image.name ==  name]
+
+        if not image_list:
+            logger.error("Image %s not found" %name)
+            return None
+
+        self._image_id = image_list[0].id
+        logger.info("Searching for uploaded image.......[Done]")
+        return self._image_id
+
+    def find_flavor(self, name=FLAVOR_NAME):
+        logger.info("Searching for required flavor: %s" %name)
+        rc, rsp = self._cal.get_flavor_list(self._acct)
+        flavor_list = [flavor for flavor in rsp.flavorinfo_list if flavor.name == name]
+
+        if not flavor_list:
+            logger.error("Flavor %s not found" %name)
+            self._flavor_id = self.create_flavor()
+        else:
+            self._flavor_id = flavor_list[0].id
+
+        logger.info("Searching for required flavor.......[Done]")
+        return self._flavor_id
+
+        
+    
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to manage openstack resources')
+    
+    parser.add_argument('--controller',
+                        action = 'store',
+                        dest = 'controller',
+                        type = str,
+                        help='IP Address of openstack controller. This is mandatory parameter')
+
+    parser.add_argument('--cleanup',
+                        action = 'store',
+                        dest = 'cleanup',
+                        nargs = '+',
+                        type = str,
+                        help = 'Perform resource cleanup for openstack installation. \n Possible options are {all, flavors, vms, networks, images}')
+
+    parser.add_argument('--persist-vms',
+                        action = 'store',
+                        dest = 'persist_vms',
+                        help = 'VM instance name to persist')
+
+    parser.add_argument('--salt-master',
+                        action = 'store',
+                        dest = 'salt_master',
+                        type = str,
+                        help='IP Address of salt controller. Required, if VMs are being created.')
+
+    parser.add_argument('--upload-image',
+                        action = 'store',
+                        dest = 'upload_image',
+                        help='Openstack image location to upload and use when creating vms.x')
+
+    parser.add_argument('--use-image',
+                        action = 'store',
+                        dest = 'use_image',
+                        help='Image name to be used for VM creation')
+
+    parser.add_argument('--use-flavor',
+                        action = 'store',
+                        dest = 'use_flavor',
+                        help='Flavor name to be used for VM creation')
+    
+    parser.add_argument('--mission-control',
+                        action = 'store_true',
+                        dest = 'mission_control',
+                        help='Create Mission Control VM')
+
+
+    parser.add_argument('--launchpad',
+                        action = 'store_true',
+                        dest = 'launchpad',
+                        help='Create LaunchPad VM')
+
+    parser.add_argument('--use-project',
+                        action = 'store',
+                        dest = 'use_project',
+                        help='Project name to be used for VM creation')
+
+    parser.add_argument('--clean-mclp',
+                        action='store_true',
+                        dest='clean_mclp',
+                        help='Remove Mission Control and Launchpad VMs')
+
+    argument = parser.parse_args()
+
+    if argument.persist_vms is not None:
+        global persistent_resources
+        vm_name_list = argument.persist_vms.split(',')
+        for single_vm in vm_name_list:
+                persistent_resources['vms'].append(single_vm)
+        logger.info("persist-vms: %s" % persistent_resources['vms'])
+
+    if argument.clean_mclp:
+        persistent_resources['vms'] = []
+
+    if argument.controller is None:
+        logger.error('Need openstack controller IP address')
+        sys.exit(-1)
+
+    
+    if argument.use_project is not None:
+        openstack_info['project_name'] = argument.use_project
+
+    ### Start processing
+    logger.info("Instantiating cloud-abstraction-layer")
+    drv = OpenstackResources(argument.controller)
+    logger.info("Instantiating cloud-abstraction-layer.......[Done]")
+
+        
+    if argument.cleanup is not None:
+        for r_type in argument.cleanup:
+            if r_type == 'all':
+                drv.destroy_resource()
+                break
+            if r_type == 'images':
+                drv._destroy_images()
+            if r_type == 'flavors':
+                drv._destroy_flavors()
+            if r_type == 'vms':
+                drv._destroy_vms()
+            if r_type == 'networks':
+                drv._destroy_networks()
+
+    if argument.upload_image is not None:
+        image_name_list = argument.upload_image.split(',')
+        logger.info("Will upload %d iamge(s): %s" % (len(image_name_list), image_name_list))
+        for image_name in image_name_list:
+            drv.create_image(image_name)
+            #print("Uploaded :", image_name)
+
+    elif argument.use_image is not None:
+        img = drv.find_image(argument.use_image)
+        if img == None:
+            logger.error("Image: %s not found" %(argument.use_image))
+            sys.exit(-4)
+    else:
+        if argument.mission_control or argument.launchpad:
+            img = drv.find_image(basename(DEFAULT_IMAGE))
+            if img == None:
+                drv.create_image(DEFAULT_IMAGE)
+
+    if argument.use_flavor is not None:
+        drv.find_flavor(argument.use_flavor)
+    else:
+        drv.find_flavor()
+        
+    if argument.mission_control == True:
+        drv.create_mission_control()
+
+    if argument.launchpad == True:
+        drv.create_launchpad_vm(salt_master = argument.salt_master)
+        
+    
+if __name__ == '__main__':
+    main()
+        
diff --git a/modules/core/rwvx/rwcal/test/rwcal_callback_gtest.cpp b/modules/core/rwvx/rwcal/test/rwcal_callback_gtest.cpp
new file mode 100644 (file)
index 0000000..f3f85d1
--- /dev/null
@@ -0,0 +1,72 @@
+
+/*
+ * 
+ * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+ *
+ */
+
+
+
+#include <rwut.h>
+
+#include "rwcal-api.h"
+
+struct test_struct {
+  int accessed;
+};
+
+struct test_struct g_test_struct;
+
+#define RWCAL_RET_UD_IDX(ud, type, idx) ((type *)rwcal_get_userdata_idx(ud, idx))
+rw_status_t update_accessed(rwcal_module_ptr_t rwcal, void * ud, int len)
+{
+  struct test_struct * ts = RWCAL_RET_UD_IDX(ud, struct test_struct, 0);
+  ts->accessed++;
+  return RW_STATUS_SUCCESS;
+}
+
+class RWCalCallbackTest : public ::testing::Test {
+  /*
+   * This is a tough one to test as we're really relying on the
+   * gobject introspection to do all the data marshalling for us
+   * correctly.  At this point, all I can think of to do is to
+   * just create a closure and then call it the same way it would
+   * typically be called in C and make sure that everything
+   * executed as expected.
+   */
+ protected:
+  rwcal_module_ptr_t rwcal;
+
+  virtual void SetUp() {
+    rwcal = rwcal_module_alloc();
+    ASSERT_TRUE(rwcal);
+
+    g_test_struct.accessed = 0;
+  }
+
+  virtual void TearDown() {
+    rwcal_module_free(&rwcal);
+  }
+
+  virtual void TestSuccess() {
+    rwcal_closure_ptr_t closure;
+
+    closure = rwcal_closure_alloc(
+        rwcal,
+        &update_accessed,
+        (void *)&g_test_struct);
+    ASSERT_TRUE(closure);
+
+    ASSERT_EQ(g_test_struct.accessed, 0);
+    rw_cal_closure_callback(closure);
+    ASSERT_EQ(g_test_struct.accessed, 1);
+
+    rwcal_closure_free(&closure);
+    ASSERT_FALSE(closure);
+  }
+};
+
+
+TEST_F(RWCalCallbackTest, TestSuccess) {
+  TestSuccess();
+}
diff --git a/modules/core/rwvx/rwcal/test/rwcal_dump.cpp b/modules/core/rwvx/rwcal/test/rwcal_dump.cpp
new file mode 100644 (file)
index 0000000..3dc307d
--- /dev/null
@@ -0,0 +1,65 @@
+
+/*
+ * 
+ * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+ *
+ */
+
+
+
+/**
+ * @file cal_dump
+ * @author Jeremy Mordkoff
+ * @date 05/14/2015 
+ * @brief test program to dump what we can glean from an installation
+ */
+
+
+#include <limits.h>
+#include <cstdlib>
+#include <iostream>
+
+#include "rwcal-api.h"
+
+
+int main(int argc, char ** argv, char ** envp)
+{
+
+#if 0
+    rw_status_t status;
+    rwcal_module_ptr_t m_mod;
+    Rwcal__YangData__Rwcal__Flavorinfo__FlavorinfoList  *flavor;
+    rwpb_gi_Rwcal_FlavorInfo *flavors;
+    Rwcal__YangData__Rwcal__Flavorinfo *flavorinfo;
+    unsigned int i;
+    char url[128];
+
+    if (argc != 4 ) {
+       fprintf(stderr, "args are IP user password\n");
+       return(1);
+    }
+    snprintf(url, 128, "http://%s:35357/v2.0/tokens", argv[1] );
+
+    m_mod = rwcal_module_alloc();
+    status = rwcal_cloud_init(m_mod, RW_MANIFEST_RWCAL_CLOUD_TYPE_OPENSTACK_AUTH_URL, argv[2], argv[3], url );
+    if (status != RW_STATUS_SUCCESS)
+      return status;
+
+    status = rwcal_cloud_flavor_infos(m_mod, &flavors);
+    if (status != RW_STATUS_SUCCESS)
+      return status;
+    flavorinfo = flavors->s.message;
+    printf("ID                                       NAME             MEM    DISK VCPU PCI  HP TC\n");
+    printf("---------------------------------------- ---------------- ------ ---- ---- ---- -- --\n");
+    for (i = 0; i<flavorinfo->n_flavorinfo_list; i++) {
+      flavor = flavorinfo->flavorinfo_list[i];
+      printf("%-40s %-16s %6d %4d %4d %4d %2d %2d\n", flavor->id, flavor->name, flavor->memory, flavor->disk, flavor->vcpus, flavor->pci_passthru_bw, 
+              flavor->has_huge_pages, flavor->trusted_host_only );
+    }
+
+    rwcal__yang_data__rwcal__flavorinfo__gi_unref(flavors);
+#endif
+    return 0;
+
+}
+
diff --git a/modules/core/rwvx/rwcal/test/rwcal_zk_gtest.cpp b/modules/core/rwvx/rwcal/test/rwcal_zk_gtest.cpp
new file mode 100644 (file)
index 0000000..f445694
--- /dev/null
@@ -0,0 +1,569 @@
+
+/*
+ * 
+ * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+ *
+ */
+
+
+
+/**
+ * @file rwcal_gtest.cc
+ * @author Anil Gunturu (anil.gunturu@riftio.com)
+ * @date 06/27/2014
+ * @brief Google test cases for testing rwcal
+ * 
+ * @details Google test cases for testing rwcal
+ */
+
+/* 
+ * Step 1: Include the necessary header files 
+ */
+#include <limits.h>
+#include <cstdlib>
+#include "rwut.h"
+#include "rwlib.h"
+#include "rw_vx_plugin.h"
+#include "rwtrace.h"
+#include "rwsched.h"
+#include "rwsched_object.h"
+#include "rwsched_queue.h"
+
+#include "rwcal-api.h"
+
+using ::testing::HasSubstr;
+
+struct int_status {
+  int x;
+  rw_status_t status;
+};
+
+/*
+ * Simple function for testing node data watchers.  Expects ud to
+ * be a pointer to an int which will be monotonically increased
+ * each time this callback is executed.
+ */
+#define RWCAL_RET_UD_IDX(ud, type, idx) ((type *)rwcal_get_userdata_idx(ud, idx))
+rw_status_t rwcal_data_watcher(rwcal_module_ptr_t rwcal, void *ud, int len)
+{
+  int * x = RWCAL_RET_UD_IDX(ud, int, 0);
+  (*x)++;
+  return RW_STATUS_SUCCESS;
+}
+
+/*
+ * Simple function for testing node data watchers.  Expects ud to
+ * be a pointer to a rw_status_t.  When executed, exits with the
+ * value pointed to by ud.
+ */
+rw_status_t rwcal_data_watcher_retval(rwcal_module_ptr_t rwcal, void *ud, int len)
+{
+  struct int_status * ctx = (struct int_status *)ud;
+  ctx->x++;
+  return ctx->status;
+}
+
+rw_status_t rwcal_rwzk_create_cb(rwcal_module_ptr_t rwcal, void *ud, int len)
+{
+  int idx = 0;
+  int * x = RWCAL_RET_UD_IDX(ud, int, idx);
+  (*x)++;
+  idx++;
+  while (idx < len) {
+    char *y =  RWCAL_RET_UD_IDX(ud, char, idx);
+    fprintf (stderr, "Got data %s\n", y);
+    idx ++;
+  }
+  return RW_STATUS_SUCCESS;
+}
+
+/*
+ * Create a test fixture for testing the plugin framework
+ *
+ * This fixture is reponsible for:
+ *   1) allocating the Virtual Executive (VX)
+ *   2) cleanup upon test completion
+ */
+class RwCalZk : public ::testing::Test {
+ public:
+
+ protected:
+  rwcal_module_ptr_t m_mod;
+
+  virtual void TestGetNonExistentNodeData() {
+    char * data;
+    rw_status_t status;
+
+    status = rwcal_rwzk_get(m_mod, "/test_get_non_existent_node_data", &data, NULL);
+    ASSERT_EQ(RW_STATUS_NOTFOUND, status);
+    ASSERT_FALSE(data);
+  }
+
+  virtual void TestSetNonExistentNodeData() {
+    rw_status_t status;
+
+    status = rwcal_rwzk_set(m_mod, "/test_set_non_existent_node_data", "blah", NULL);
+    ASSERT_EQ(RW_STATUS_NOTFOUND, status);
+  }
+
+  virtual void TestNodeData() {
+    rw_status_t status;
+    const char * data = "aa;pofihea;coiha;cmeioeaher";
+    char * out_str;
+  
+    status = rwcal_rwzk_create(m_mod, "/test_node_data", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+    /* test setting the node data */
+    status = rwcal_rwzk_set(m_mod, "/test_node_data", data, NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    /* test get */
+    status = rwcal_rwzk_get(m_mod, "/test_node_data", &out_str, NULL);
+
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    ASSERT_TRUE(out_str);
+    ASSERT_STREQ(out_str, data);
+
+    free(out_str);
+    out_str = NULL;
+
+    /* test delete */
+    status = rwcal_rwzk_delete(m_mod, "/test_node_data", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+    int inc_int = 0;
+    rwcal_closure_ptr_t closure;
+
+    closure = rwcal_closure_alloc(m_mod, &rwcal_rwzk_create_cb, (void *)&inc_int);
+    ASSERT_TRUE(closure);
+
+    status = rwcal_rwzk_create(m_mod, "/async_test_node/data", closure);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    int i;
+    for (i=0; i < 100; ++i) {
+      if (inc_int)
+        break;
+      usleep(1000);
+    }
+    ASSERT_EQ(inc_int, 1);
+
+
+    inc_int = 0;
+    /* test setting the node data */
+    status = rwcal_rwzk_set(m_mod, "/async_test_node/data", data, closure);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    for (i=0; i < 100; ++i) {
+      if (inc_int)
+        break;
+      usleep(1000);
+    }
+    ASSERT_EQ(inc_int, 1);
+
+    inc_int = 0;
+    status = rwcal_rwzk_get(m_mod, "/async_test_node/data", &out_str, closure);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    for (i=0; i < 100; ++i) {
+      if (inc_int)
+        break;
+      usleep(1000);
+    }
+    ASSERT_EQ(inc_int, 1);
+    ASSERT_FALSE(out_str);
+
+
+    /* test get */
+    status = rwcal_rwzk_get(m_mod, "/async_test_node/data", &out_str, NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    ASSERT_TRUE(out_str);
+    ASSERT_STREQ(out_str, data);
+    free(out_str);
+    out_str = NULL;
+
+    char ** children;
+    status = rwcal_rwzk_get_children(m_mod, "/", &children, NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    ASSERT_TRUE(children);
+    ASSERT_STREQ(children[0], "async_test_node");
+    ASSERT_FALSE(children[1]);
+    free(children[0]);
+    free(children);
+    children = NULL;
+
+    inc_int = 0;
+    status = rwcal_rwzk_get_children(m_mod, "/", &children, closure);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    for (i=0; i < 100; ++i) {
+      if (inc_int)
+        break;
+      usleep(1000);
+    }
+    ASSERT_EQ(inc_int, 1);
+    ASSERT_FALSE(children);
+
+    status = rwcal_rwzk_create(m_mod, "/test_node_data1", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    status = rwcal_rwzk_create(m_mod, "/test_node_data2", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    status = rwcal_rwzk_create(m_mod, "/test_node_data3", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    status = rwcal_rwzk_create(m_mod, "/test_node_data4", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    status = rwcal_rwzk_create(m_mod, "/test_node_data5", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+    inc_int = 0;
+    status = rwcal_rwzk_get_children(m_mod, "/", &children, closure);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    for (i=0; i < 100; ++i) {
+      if (inc_int)
+        break;
+      usleep(1000);
+    }
+    ASSERT_EQ(inc_int, 1);
+    ASSERT_FALSE(children);
+
+    status = rwcal_rwzk_delete(m_mod, "/test_node_data1", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    status = rwcal_rwzk_delete(m_mod, "/test_node_data2", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    status = rwcal_rwzk_delete(m_mod, "/test_node_data3", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    status = rwcal_rwzk_delete(m_mod, "/test_node_data4", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    status = rwcal_rwzk_delete(m_mod, "/test_node_data5", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+
+
+
+
+    inc_int = 0;
+    /* test setting the node data */
+    status = rwcal_rwzk_delete(m_mod, "/async_test_node/data", closure);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    for (i=0; i < 100; ++i) {
+      if (inc_int)
+        break;
+      usleep(1000);
+    }
+    ASSERT_EQ(inc_int, 1);
+
+    rwcal_closure_free(&closure);
+    ASSERT_FALSE(closure);
+  }
+
+  virtual void TestExists() {
+    rw_status_t status;
+    bool exists;
+
+    exists = rwcal_rwzk_exists(m_mod, "/test_exists");
+    ASSERT_FALSE(exists);
+
+    status = rwcal_rwzk_create(m_mod, "/test_exists", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+    exists = rwcal_rwzk_exists(m_mod, "/test_exists");
+    ASSERT_TRUE(exists);
+
+    status = rwcal_rwzk_delete(m_mod, "/test_exists", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+    exists = rwcal_rwzk_exists(m_mod, "/test_exists");
+    ASSERT_FALSE(exists);
+  }
+
+  virtual void TestCreateExistingNode() {
+    rw_status_t status;
+
+    /* create a node */
+    status = rwcal_rwzk_create(m_mod, "/test_create_existing_node", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+    /* try to create the node again */
+    status = rwcal_rwzk_create(m_mod, "/test_create_existing_node", NULL);
+    ASSERT_EQ(RW_STATUS_EXISTS, status);
+
+    /*Delete all nodes*/
+    status = rwcal_rwzk_delete( m_mod, "/test_create_existing_node", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+  }
+
+  virtual void TestCreateDeleteNode() {
+    rw_status_t status;
+    char ** children;
+
+    status = rwcal_rwzk_create(m_mod, "/test_create_delete_node", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+    status = rwcal_rwzk_get_children(m_mod, "/", &children, NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    ASSERT_TRUE(children);
+    ASSERT_STREQ(children[0], "test_create_delete_node");
+    ASSERT_FALSE(children[1]);
+
+    free(children[0]);
+    free(children);
+
+    status = rwcal_rwzk_delete(m_mod, "/test_create_delete_node", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+    status = rwcal_rwzk_get_children(m_mod, "/", &children, NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    ASSERT_TRUE(children);
+    ASSERT_FALSE(children[0]);
+
+    free(children);
+  }
+
+  virtual void TestDeleteNonExistentNode() {
+    rw_status_t status;
+
+    /* try deleting a node that doesn't exist */
+    status = rwcal_rwzk_delete(m_mod, "/test_delete_non_existent_node", NULL);
+    ASSERT_EQ(RW_STATUS_NOTFOUND, status);
+  }
+
+  virtual void TestLock() {
+    rw_status_t status;
+    bool locked;
+    struct timeval tv = { .tv_sec = 0, .tv_usec = 1000 };
+   
+    // Test locking nonexistant node
+    status = rwcal_rwzk_lock(m_mod, "/test_lock", NULL);
+    ASSERT_EQ(RW_STATUS_NOTFOUND, status);
+
+    status = rwcal_rwzk_unlock(m_mod, "/test_lock");
+    ASSERT_EQ(RW_STATUS_NOTFOUND, status);
+
+    locked = rwcal_rwzk_locked(m_mod, "/test_lock");
+    ASSERT_FALSE(locked);
+
+
+    status = rwcal_rwzk_create(m_mod, "/test_lock", NULL);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+    // Test unlocking node that has not previously been locked
+    status = rwcal_rwzk_unlock(m_mod, "/test_lock");
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+    locked = rwcal_rwzk_locked(m_mod, "/test_lock");
+    ASSERT_FALSE(locked);
+
+    // Lock the node
+    status = rwcal_rwzk_lock(m_mod, "/test_lock", &tv);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+    locked = rwcal_rwzk_locked(m_mod, "/test_lock");
+    ASSERT_TRUE(locked);
+
+    // Test relocking same node
+    status = rwcal_rwzk_lock(m_mod, "/test_lock", &tv);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+    // Test unlocking locked node
+    status = rwcal_rwzk_unlock(m_mod, "/test_lock");
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+    locked = rwcal_rwzk_locked(m_mod, "/test_lock");
+    ASSERT_TRUE(locked);
+
+    // Test unlocking previously locked node
+    status = rwcal_rwzk_unlock(m_mod, "/test_lock");
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+
+    locked = rwcal_rwzk_locked(m_mod, "/test_lock");
+    ASSERT_FALSE(locked);
+
+  }
+
+  virtual void TestWatcher() {
+    rw_status_t status;
+    int inc_int = 0;
+    rwcal_closure_ptr_t closure;
+
+    closure = rwcal_closure_alloc(m_mod, &rwcal_data_watcher, (void *)&inc_int);
+    ASSERT_TRUE(closure);
+
+    status = rwcal_rwzk_register_watcher(m_mod, "/test_watcher", closure);
+    ASSERT_EQ(status, RW_STATUS_SUCCESS);
+    ASSERT_EQ(inc_int, 0);
+
+    status = rwcal_rwzk_create(m_mod, "/test_watcher", NULL);
+    ASSERT_EQ(status, RW_STATUS_SUCCESS);
+
+    // Watcher is executed in a separate thread, give it time to
+    // update.
+    for (int i = 0; i < 100; ++i) {
+      if (inc_int)
+        break;
+      usleep(1000);
+    }
+    ASSERT_EQ(inc_int, 1);
+
+    inc_int = 0;
+    status = rwcal_rwzk_set(m_mod, "/test_watcher", "blah", NULL);
+    ASSERT_EQ(status, RW_STATUS_SUCCESS);
+    for (int i = 0; i < 100; ++i) {
+      if (inc_int)
+        break;
+      usleep(1000);
+    }
+    ASSERT_EQ(inc_int, 1);
+
+    inc_int = 0;
+    status = rwcal_rwzk_create(m_mod, "/test_watcher2", NULL);
+    ASSERT_EQ(status, RW_STATUS_SUCCESS);
+    ASSERT_EQ(inc_int, 0);
+
+    inc_int = 0;
+    bool exists;
+    exists = rwcal_rwzk_exists(m_mod, "/test_watcher");
+    ASSERT_TRUE(exists);
+    ASSERT_EQ(inc_int, 0);
+
+    inc_int = 0;
+    status = rwcal_rwzk_unlock(m_mod, "/test_watcher");
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+    ASSERT_EQ(inc_int, 0);
+
+    status = rwcal_rwzk_delete(m_mod, "/test_watcher", NULL);
+    ASSERT_EQ(status, RW_STATUS_SUCCESS);
+    for (int i = 0; i < 100; ++i) {
+      if (inc_int)
+        break;
+      usleep(1000);
+    }
+    ASSERT_EQ(inc_int, 1);
+
+    status = rwcal_rwzk_unregister_watcher(m_mod, "/test_watcher", closure);
+    ASSERT_EQ(status, RW_STATUS_SUCCESS);
+
+    inc_int = 0;
+    status = rwcal_rwzk_create(m_mod, "/test_watcher", NULL);
+    ASSERT_EQ(status, RW_STATUS_SUCCESS);
+    for (int i = 0; i < 100; ++i) {
+      if (inc_int)
+        break;
+      usleep(1000);
+    }
+    ASSERT_EQ(inc_int, 0);
+
+    rwcal_closure_free(&closure);
+    ASSERT_FALSE(closure);
+
+    status = rwcal_rwzk_delete(m_mod, "/test_watcher", NULL);
+    ASSERT_EQ(status, RW_STATUS_SUCCESS);
+
+    status = rwcal_rwzk_delete(m_mod, "/test_watcher2", NULL);
+    ASSERT_EQ(status, RW_STATUS_SUCCESS);
+  }
+
+#if 0
+  /*
+   * It would be nice to add this test, but, because the kazoo
+   * watcher executes in a separate thread there is no way to
+   * catch any exceptions raised there.
+   *
+   * Hopefully this can be addressed as part of RIFT-2812
+   */
+  virtual void TestFailingWatcher() {
+    rw_status_t status;
+    rwcal_closure_ptr_t closure;
+    struct int_status ctx = { .x = 0, .status = RW_STATUS_FAILURE };
+
+    closure = rwcal_closure_alloc(m_mod, &rwcal_data_watcher_retval, (void *)&ctx);
+    ASSERT_TRUE(closure);
+
+    // Note that each event is currently causing two calls.  See
+    // RIFT-2812
+    status = rwcal_rwzk_register_watcher(m_mod, "/test_failing_watcher", closure);
+    ASSERT_EQ(status, RW_STATUS_SUCCESS);
+
+    status = rwcal_rwzk_create(m_mod, "/test_failing_watcher");
+    ASSERT_EQ(status, RW_STATUS_SUCCESS);
+    for (int i = 0; i < 1000; ++i) {
+      if (ctx.x > 0)
+        break;
+      usleep(1000);
+    }
+    ASSERT_GT(ctx.x, 0);
+
+    PyGILState_STATE state = PyGILState_Ensure ();
+
+    PyObject * exc = PyErr_Occurred();
+    PyGILState_Release (state);
+
+    
+  }
+#endif
+
+  virtual void InitPluginFramework() {
+    rw_status_t status;
+
+    m_mod = rwcal_module_alloc();
+    ASSERT_TRUE(m_mod);
+
+    status = rwcal_rwzk_zake_init(m_mod);
+    ASSERT_EQ(RW_STATUS_SUCCESS, status);
+  }
+
+  virtual void TearDown() {
+    rwcal_module_free(&m_mod);
+  }
+};
+
+class RwCalZakePythonPlugin : public RwCalZk {
+  virtual void SetUp() {
+    InitPluginFramework();
+  }
+};
+
+TEST_F(RwCalZakePythonPlugin, CreateDeleteNode) {
+  TestCreateDeleteNode();
+}
+
+TEST_F(RwCalZakePythonPlugin, CreateExistingNode) {
+  TestCreateExistingNode();
+}
+
+TEST_F(RwCalZakePythonPlugin, NodeData) {
+  TestNodeData();
+}
+
+TEST_F(RwCalZakePythonPlugin, Exists) {
+  TestExists();
+}
+
+TEST_F(RwCalZakePythonPlugin, GetNonExistentNodeData) {
+  TestGetNonExistentNodeData();
+}
+
+TEST_F(RwCalZakePythonPlugin, SetNonExistentNodeData) {
+  TestSetNonExistentNodeData();
+}
+
+TEST_F(RwCalZakePythonPlugin, DeleteNonExistentNode) {
+  TestDeleteNonExistentNode();
+}
+
+TEST_F(RwCalZakePythonPlugin, TestLock) {
+  TestLock();
+}
+
+TEST_F(RwCalZakePythonPlugin, TestWatcher) {
+  TestWatcher();
+}
+
+TEST_F(RwCalZakePythonPlugin, TestWatcher2) {
+  TestWatcher();
+}
+
+#if 0
+// See comments at TestFailingWatcher's implementation
+TEST_F(RwCalZakePythonPlugin, TestFailingWatcher) {
+  TestFailingWatcher();
+}
+#endif
+
diff --git a/modules/core/rwvx/rwcal/test/test_container_cal.py b/modules/core/rwvx/rwcal/test/test_container_cal.py
new file mode 100644 (file)
index 0000000..29624e2
--- /dev/null
@@ -0,0 +1,147 @@
+#!/usr/bin/env python3
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+
+import argparse
+import logging
+import os
+import sys
+import time
+
+import rw_peas
+import rwlogger
+
+from gi.repository import RwcalYang
+
+import rift.rwcal.cloudsim
+import rift.rwcal.cloudsim.lxc as lxc
+
+logger = logging.getLogger('rift.cal')
+
+
+def main(argv=sys.argv[1:]):
+    """
+    Assuming that an LVM backing-store has been created with a volume group
+    called 'rift', the following creates an lxc 'image' and a pair of 'vms'.
+    In the LXC based container CAL, an 'image' is container and a 'vm' is a
+    snapshot of the original container.
+
+    In addition to the LVM backing store, it is assumed that there is a network
+    bridge called 'virbr0'.
+
+    """
+    logging.basicConfig(level=logging.DEBUG)
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--rootfs', '-r')
+    parser.add_argument('--num-vms', '-n', type=int, default=2)
+    parser.add_argument('--terminate', '-t', action='store_true')
+
+    args = parser.parse_args(argv)
+
+    # Acquire the plugin from peas
+    plugin = rw_peas.PeasPlugin('rwcal-plugin', 'RwCal-1.0')
+    engine, info, extension = plugin()
+
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+    cal = plugin.get_interface("Cloud")
+    cal.init(rwloggerctx)
+
+    # The account object is not currently used, but it is required by the CAL
+    # interface, so we create an empty object here to represent it.
+    account = RwcalYang.CloudAccount()
+    account.account_type = "lxc"
+
+    # Make sure that any containers that were previously created have been
+    # stopped and destroyed.
+    containers = lxc.containers()
+
+    for container in containers:
+        lxc.stop(container)
+
+    for container in containers:
+        lxc.destroy(container)
+
+    template = os.path.join(
+            os.environ['RIFT_INSTALL'],
+            'etc/lxc-fedora-rift.lxctemplate',
+            )
+
+    logger.info(template)
+    logger.info(args.rootfs)
+
+    # Create an image that can be used to create VMs
+    image = RwcalYang.ImageInfoItem()
+    image.name = 'rift-master'
+    image.lxc.size = '2.5G'
+    image.lxc.template_path = template
+    image.lxc.tarfile = args.rootfs
+
+    cal.create_image(account, image)
+
+    # Create a VM
+    vms = []
+    for index in range(args.num_vms):
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_name = 'rift-s{}'.format(index + 1)
+        vm.image_id = image.id
+
+        cal.create_vm(account, vm)
+
+        vms.append(vm)
+
+    # Create the default and data networks
+    network = RwcalYang.NetworkInfoItem(network_name='virbr0')
+    cal.create_network(account, network)
+
+    os.system('brctl show')
+
+    # Create pairs of ports to connect the networks
+    for index, vm in enumerate(vms):
+        port = RwcalYang.PortInfoItem()
+        port.port_name = "eth0"
+        port.network_id = network.network_id
+        port.vm_id = vm.vm_id
+        port.ip_address = "192.168.122.{}".format(index + 101)
+        port.lxc.veth_name = "rws{}".format(index)
+
+        cal.create_port(account, port)
+
+    # Swap out the current instance of the plugin to test that the data is
+    # shared among different instances
+    cal = plugin.get_interface("Cloud")
+    cal.init()
+
+    # Start the VMs
+    for vm in vms:
+        cal.start_vm(account, vm.vm_id)
+
+    lxc.ls()
+
+    # Exit if the containers are not supposed to be terminated
+    if not args.terminate:
+        return
+
+    time.sleep(3)
+
+    # Stop the VMs
+    for vm in vms:
+        cal.stop_vm(account, vm.vm_id)
+
+    lxc.ls()
+
+    # Delete the VMs
+    for vm in vms:
+        cal.delete_vm(account, vm.vm_id)
+
+    # Delete the image
+    cal.delete_image(account, image.id)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/modules/core/rwvx/rwcal/test/test_openstack_install.py b/modules/core/rwvx/rwcal/test/test_openstack_install.py
new file mode 100644 (file)
index 0000000..8115364
--- /dev/null
@@ -0,0 +1,555 @@
+"""
+#
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+#
+# @file test_openstack_install.py
+# @author Varun Prasad (varun.prasad@riftio.com)
+# @date 10/10/2015
+# @brief Test Openstack/os install
+#
+"""
+
+import logging
+import re
+import socket
+import sys
+import time
+import tempfile
+
+from keystoneclient.v3 import client
+import paramiko
+import pytest
+import requests
+import xmlrpc.client
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+import rw_peas
+import rwlogger
+
+
+logger = logging.getLogger()
+logging.basicConfig(level=logging.INFO)
+
+
+class Host(object):
+    """A wrapper on top of a host, which provides a ssh connection instance.
+
+    Assumption:
+    The username/password for the VM is default.
+    """
+    _USERNAME = "root"
+    _PASSWORD = "riftIO"
+
+    def __init__(self, hostname):
+        """
+        Args:
+            hostname (str): Hostname (grunt3.qanet.riftio.com)
+        """
+        self.hostname = hostname
+        try:
+            self.ip = socket.gethostbyname(hostname)
+        except socket.gaierror:
+            logger.error("Unable to resolve the hostname {}".format(hostname))
+            sys.exit(1)
+
+        self.ssh = paramiko.SSHClient()
+        # Note: Do not load the system keys as the test will fail if the keys
+        # change.
+        self.ssh.set_missing_host_key_policy(paramiko.WarningPolicy())
+
+    def connect(self):
+        """Set up ssh connection.
+        """
+        logger.debug("Trying to connect to {}: {}".format(
+                self.hostname,
+                self.ip))
+
+        self.ssh.connect(
+                self.ip,
+                username=self._USERNAME,
+                password=self._PASSWORD)
+
+    def put(self, content, dest):
+        """Creates a tempfile and puts it in the destination path in the HOST.
+        Args:
+            content (str): Content to be written to a file.
+            dest (str): Path to store the content.
+        """
+        temp_file = tempfile.NamedTemporaryFile(delete=False)
+        temp_file.write(content.encode("UTF-8"))
+        temp_file.close()
+
+        logger.info("Writing {} file in {}".format(dest, self.hostname))
+        sftp = self.ssh.open_sftp()
+        sftp.put(temp_file.name, dest)
+        sftp.close()
+
+    def clear(self):
+        """Clean up
+        """
+        self.ssh.close()
+
+
+class Grunt(Host):
+    """A wrapper on top of grunt machine, provides functionalities to check
+    if the grunt is up, IP resolution.
+    """
+    @property
+    def grunt_name(self):
+        """Extract the grunt name from the FQDN
+
+        Returns:
+            str: e.g. grunt3 from grunt3.qanet.riftio.com
+        """
+        return self.hostname.split(".")[0]
+
+    @property
+    def dns_server(self):
+        """Hard-coded for now.
+        """
+        return "10.95.0.3"
+
+    @property
+    def floating_ip(self):
+        return "10.95.1.0"
+
+    @property
+    def private_ip(self):
+        """Construct the private IP from the grunt name. 10.0.xx.0 where xx is
+        value of the grunt (3 in case of grunt3)
+        """
+        host_part = re.sub(r"[a-zA-z]+", "", self.grunt_name)
+        return '10.0.{}.0'.format(host_part)
+
+    def is_system_up(self):
+        """Checks if system is up using ssh login.
+
+        Returns:
+            bool: Indicates if system is UP
+        """
+        try:
+            self.connect()
+        except OSError:
+            return False
+
+        return True
+
+    def wait_till_system_is_up(self, timeout=50, check_openstack=False):
+        """Blocking call to check if system is up.
+        Args:
+            timeout (int, optional): In mins(~).
+            check_openstack (bool, optional): If true will also check if
+                openstack is up and running on the system.
+
+        Raises:
+            OSError: If system start exceeds the timeout
+        """
+
+        TRY_DURATION = 20  # secs
+        total_tries = timeout * (60 / TRY_DURATION)  # 3 tries/mins i.e. 20 secs.
+        tries = 0
+
+        while tries < total_tries:
+            if self.is_system_up():
+                if check_openstack and self.is_openstack_up():
+                        return
+                elif not check_openstack:
+                    return
+
+            logger.info("{} down: Sleeping for {} secs. Try {} of {}".format(
+                    self.hostname,
+                    TRY_DURATION,
+                    tries,
+                    int(total_tries)))
+
+            time.sleep(TRY_DURATION)
+            tries += 1
+
+        raise OSError("Exception in system start {}({})".format(
+                self.hostname,
+                self.ip))
+
+    def is_openstack_up(self):
+        """Checks if openstack is UP, by verifying the URL.
+
+        Returns:
+            bool: Indicates if system is UP
+        """
+        url = "http://{}/dashboard/".format(self.ip)
+
+        logger.info("Checking if openstack({}) is UP".format(url))
+
+        try:
+            requests.get(url)
+        except requests.ConnectionError:
+            return False
+
+        return True
+
+
+class Cobbler(Host):
+    """A thin wrapper on cobbler and provides an interface using XML rpc client.
+
+    Assumption:
+    System instances are already added to cobbler(with ipmi). Adding instances
+    can also be automated, can be taken up sometime later.
+    """
+    def __init__(self, hostname, username="cobbler", password="cobbler"):
+        """
+        Args:
+            hostname (str): Cobbler host.
+            username (str, optional): username.
+            password (str, optional): password
+        """
+        super().__init__(hostname)
+
+        url = "https://{}/cobbler_api".format(hostname)
+
+        self.server = xmlrpc.client.ServerProxy(url)
+        logger.info("obtained a cobbler instance for the host {}".format(hostname))
+
+        self.token = self.server.login(username, password)
+        self.connect()
+
+    def create_profile(self, profile_name, ks_file):
+        """Create the profile for the system.
+
+        Args:
+            profile_name (str): Name of the profile.
+            ks_file (str): Path of the kick start file.
+        """
+        profile_attrs = {
+                "name": profile_name,
+                "kickstart": ks_file,
+                "repos": ['riftware', 'rift-misc', 'fc21-x86_64-updates',
+                          'fc21-x86_64', 'openstack-kilo'],
+                "owners": ["admin"],
+                "distro": "FC21.3-x86_64"
+                }
+
+        profile_id = self.server.new_profile(self.token)
+        for key, value in profile_attrs.items():
+            self.server.modify_profile(profile_id, key, value, self.token)
+        self.server.save_profile(profile_id, self.token)
+
+    def create_snippet(self, snippet_name, snippet_content):
+        """Unfortunately the XML rpc apis don't provide a direct interface to
+        create snippets, so falling back on the default sftp methods.
+
+        Args:
+            snippet_name (str): Name.
+            snippet_content (str): snippet's content.
+
+        Returns:
+            str: path where the snippet is stored
+        """
+        path = "/var/lib/cobbler/snippets/{}".format(snippet_name)
+        self.put(snippet_content, path)
+        return path
+
+    def create_kickstart(self, ks_name, ks_content):
+        """Creates and returns the path of the ks file.
+
+        Args:
+            ks_name (str): Name of the ks file to be saved.
+            ks_content (str): Content for ks file.
+
+        Returns:
+            str: path where the ks file is saved.
+        """
+        path = "/var/lib/cobbler/kickstarts/{}".format(ks_name)
+        self.put(ks_content, path)
+        return path
+
+    def boot_system(self, grunt, profile_name, false_boot=False):
+        """Boots the system with the profile specified. Also enable net-boot
+
+        Args:
+            grunt (Grunt): instance of grunt
+            profile_name (str): A valid profile name.
+            false_boot (bool, optional): debug only option.
+        """
+        if false_boot:
+            return
+
+        system_id = self.server.get_system_handle(
+                grunt.grunt_name,
+                self.token)
+        self.server.modify_system(
+                system_id,
+                "profile",
+                profile_name,
+                self.token)
+
+        self.server.modify_system(
+                system_id,
+                "netboot_enabled",
+                "True",
+                self.token)
+        self.server.save_system(system_id, self.token)
+        self.server.power_system(system_id, "reboot", self.token)
+
+
+class OpenstackTest(object):
+    """Driver class to automate the installation.
+    """
+    def __init__(
+            self,
+            cobbler,
+            controller,
+            compute_nodes=None,
+            test_prefix="openstack_test"):
+        """
+        Args:
+            cobbler (Cobbler): Instance of Cobbler
+            controller (Controller): Controller node instance
+            compute_nodes (TYPE, optional): A list of Grunt nodes to be set up
+                    as compute nodes.
+            test_prefix (str, optional): All entities created by the script are
+                    prefixed with this string.
+        """
+        self.cobbler = cobbler
+        self.controller = controller
+        self.compute_nodes = [] if compute_nodes is None else compute_nodes
+        self.test_prefix = test_prefix
+
+    def _prepare_snippet(self):
+        """Prepares the config based on the controller and compute nodes.
+
+        Returns:
+            str: Openstack config content.
+        """
+        content = ""
+
+        config = {}
+        config['host_name'] = self.controller.grunt_name
+        config['ip'] = self.controller.ip
+        config['dns_server'] = self.controller.dns_server
+        config['private_ip'] = self.controller.private_ip
+        config['floating_ip'] = self.controller.floating_ip
+
+        content += Template.GRUNT_CONFIG.format(**config)
+        for compute_node in self.compute_nodes:
+            config["host_name"] = compute_node.grunt_name
+            content += Template.GRUNT_CONFIG.format(**config)
+
+        content = Template.SNIPPET_TEMPLATE.format(config=content)
+
+        return content
+
+    def prepare_profile(self):
+        """Creates the cobbler profile.
+        """
+        snippet_content = self._prepare_snippet()
+        self.cobbler.create_snippet(
+                "{}.cfg".format(self.test_prefix),
+                snippet_content)
+
+        ks_content = Template.KS_TEMPATE
+        ks_file = self.cobbler.create_kickstart(
+                "{}.ks".format(self.test_prefix),
+                ks_content)
+
+        self.cobbler.create_profile(self.test_prefix, ks_file)
+        return self.test_prefix
+
+    def _get_cal_account(self):
+        """
+        Creates an object for class RwcalYang.CloudAccount()
+        """
+        account                        = RwcalYang.CloudAccount()
+        account.account_type           = "openstack"
+        account.openstack.key          = "{}_user".format(self.test_prefix)
+        account.openstack.secret       = "mypasswd"
+        account.openstack.auth_url     = 'http://{}:35357/v3/'.format(self.controller.ip)
+        account.openstack.tenant       = self.test_prefix
+
+        return account
+
+    def start(self):
+        """Starts the installation.
+        """
+        profile_name = self.prepare_profile()
+
+        self.cobbler.boot_system(self.controller, profile_name)
+        self.controller.wait_till_system_is_up(check_openstack=True)
+
+        try:
+            logger.info("Controller system is UP. Setting up compute nodes")
+            for compute_node in self.compute_nodes:
+                self.cobbler.boot_system(compute_node, profile_name)
+                compute_node.wait_till_system_is_up()
+        except OSError as e:
+            logger.error("System set-up failed {}".format(e))
+            sys.exit(1)
+
+        # Currently we don't have wrapper on top of users/projects so using
+        # keystone API directly
+        acct = self._get_cal_account()
+
+        keystone_conn = client.Client(
+                auth_url=acct.openstack.auth_url,
+                username='admin',
+                password='mypasswd')
+
+        # Create a test project
+        project = keystone_conn.projects.create(
+                acct.openstack.tenant,
+                "default",
+                description="Openstack test project")
+
+        # Create an user
+        user = keystone_conn.users.create(
+                acct.openstack.key,
+                password=acct.openstack.secret,
+                default_project=project)
+
+        # Make the newly created user as ADMIN
+        admin_role = keystone_conn.roles.list(name="admin")[0]
+        keystone_conn.roles.grant(
+                admin_role.id,
+                user=user.id,
+                project=project.id)
+
+        # nova API needs to be restarted, otherwise the new service doesn't play
+        # well
+        self.controller.ssh.exec_command("source keystonerc_admin && "
+                "service openstack-nova-api restart")
+        time.sleep(10)
+
+        return acct
+
+    def clear(self):
+        """Close out all SFTP connections.
+        """
+        nodes = [self.controller]
+        nodes.extend(self.compute_nodes)
+        for node in nodes:
+            node.clear()
+
+
+###############################################################################
+## Begin pytests
+###############################################################################
+
+
+@pytest.fixture(scope="session")
+def cal(request):
+    """
+    Loads rw.cal plugin via libpeas
+    """
+    plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+    engine, info, extension = plugin()
+
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+    cal = plugin.get_interface("Cloud")
+    try:
+        rc = cal.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except:
+        logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+    else:
+        logger.info("Openstack Cal plugin successfully instantiated")
+
+    return cal
+
+
+@pytest.fixture(scope="session")
+def account(request):
+    """Creates an openstack instance with 1 compute node and returns the newly
+    created account.
+    """
+    cobbler = Cobbler("qacobbler.eng.riftio.com")
+    controller = Grunt("grunt3.qanet.riftio.com")
+    compute_nodes = [Grunt("grunt5.qanet.riftio.com")]
+
+    test = OpenstackTest(cobbler, controller, compute_nodes)
+    account = test.start()
+
+    request.addfinalizer(test.clear)
+    return account
+
+
+def test_list_images(cal, account):
+    """Verify if 2 images are present
+    """
+    status, resources = cal.get_image_list(account)
+    assert len(resources.imageinfo_list) == 2
+
+def test_list_flavors(cal, account):
+    """Basic flavor checks
+    """
+    status, resources = cal.get_flavor_list(account)
+    assert len(resources.flavorinfo_list) == 5
+
+
+class Template(object):
+    """A container to hold all cobbler related templates.
+    """
+    GRUNT_CONFIG = """
+{host_name})
+    CONTROLLER={ip}
+    BRGIF=1
+    OVSDPDK=N
+    TRUSTED=N
+    QAT=N
+    HUGEPAGE=0
+    VLAN=10:14
+    PRIVATE_IP={private_ip}
+    FLOATING_IP={floating_ip}
+    DNS_SERVER={dns_server}
+    ;;
+
+    """
+
+    SNIPPET_TEMPLATE = """
+# =====================Begining of snippet=================
+# snippet openstack_test.cfg
+case $name in
+
+{config}
+
+*)
+    ;;
+esac
+
+# =====================End of snippet=================
+
+"""
+
+    KS_TEMPATE = """
+$SNIPPET('rift-repos')
+$SNIPPET('rift-base')
+%packages
+@core
+wget
+$SNIPPET('rift-grunt-fc21-packages')
+ganglia-gmetad
+ganglia-gmond
+%end
+
+%pre
+$SNIPPET('log_ks_pre')
+$SNIPPET('kickstart_start')
+# Enable installation monitoring
+$SNIPPET('pre_anamon')
+%end
+
+%post --log=/root/ks_post.log
+$SNIPPET('openstack_test.cfg')
+$SNIPPET('ganglia')
+$SNIPPET('rift-post-yum')
+$SNIPPET('rift-post')
+$SNIPPET('rift_fix_grub')
+
+$SNIPPET('rdo-post')
+echo "banner RDO test" >> /etc/profile
+
+$SNIPPET('kickstart_done')
+%end
+"""
diff --git a/modules/core/rwvx/rwcal/test/test_rwcal_openstack.py b/modules/core/rwvx/rwcal/test/test_rwcal_openstack.py
new file mode 100644 (file)
index 0000000..a091d5f
--- /dev/null
@@ -0,0 +1,1038 @@
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+import datetime
+import logging
+import time
+import unittest
+
+import novaclient.exceptions as nova_exception
+import paramiko
+import rw_peas
+import rwlogger
+from keystoneclient import v3 as ksclient
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+from rift.rwcal.openstack.openstack_drv import KeystoneDriver, NovaDriver
+
+logger = logging.getLogger('rwcal-openstack')
+
+#
+# Important information about openstack installation. This needs to be manually verified 
+#
+openstack_info = {
+    'username'           : 'pluto',
+    'password'           : 'mypasswd',
+    'auth_url'           : 'http://10.66.4.14:5000/v3/',
+    'project_name'       : 'demo',
+    'mgmt_network'       : 'private',
+    'reserved_flavor'    : 'm1.medium',
+    'reserved_image'     : 'rift-root-latest.qcow2',
+    'physical_network'   : None,
+    'network_type'       : None,
+    'segmentation_id'    : None
+    }
+
+
+def get_cal_account():
+    """
+    Creates an object for class RwcalYang.CloudAccount()
+    """
+    account                        = RwcalYang.CloudAccount()
+    account.account_type           = "openstack"
+    account.openstack.key          = openstack_info['username']
+    account.openstack.secret       = openstack_info['password']
+    account.openstack.auth_url     = openstack_info['auth_url']
+    account.openstack.tenant       = openstack_info['project_name']
+    account.openstack.mgmt_network = openstack_info['mgmt_network']
+    return account
+
+def get_cal_plugin():
+    """
+    Loads rw.cal plugin via libpeas
+    """
+    plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+    engine, info, extension = plugin()
+
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+    cal = plugin.get_interface("Cloud")
+    try:
+        rc = cal.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except:
+        logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+    else:
+        logger.info("Openstack Cal plugin successfully instantiated")
+    return cal 
+
+
+class OpenStackTest(unittest.TestCase):
+    IMG_Checksum = "12312313123131313131" # Some random number to test image tagging
+    NodeID = "123456789012345" # Some random number to test VM tagging
+    MemoryPageSize = "LARGE"
+    CpuPolicy = "DEDICATED"
+    CpuThreadPolicy = "SEPARATE"
+    CpuThreads = 1
+    NumaNodeCount = 2
+    HostTrust = "trusted"
+    PCIPassThroughAlias = "PCI_10G_ALIAS"
+    SEG_ID = openstack_info['segmentation_id']
+    
+    def setUp(self):
+        """
+        Assumption:
+         - It is assumed that openstack install has a flavor and image precreated.
+         - Flavor_name: x1.xlarge
+         - Image_name : rwimage
+
+        If these resources are not then this test will fail.
+        """
+        self._acct = get_cal_account()
+        logger.info("Openstack-CAL-Test: setUp")
+        self.cal   = get_cal_plugin()
+        logger.info("Openstack-CAL-Test: setUpEND")
+        
+        # First check for VM Flavor and Image and get the corresponding IDs
+        rc, rs = self.cal.get_flavor_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        flavor_list = [ flavor for flavor in rs.flavorinfo_list if flavor.name == openstack_info['reserved_flavor'] ]
+        self.assertNotEqual(len(flavor_list), 0)
+        self._flavor = flavor_list[0]
+
+        rc, rs = self.cal.get_image_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        image_list = [ image for image in rs.imageinfo_list if image.name == openstack_info['reserved_image'] ]
+        self.assertNotEqual(len(image_list), 0)
+        self._image = image_list[0]
+
+        rc, rs = self.cal.get_network_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        networks = [ network for network in rs.networkinfo_list if (network.network_name == 'rift.cal.unittest.network' or network.network_name == 'rift.cal.virtual_link') ]
+        for network in networks:
+            self.cal.delete_virtual_link(self._acct, network.network_id)
+            
+    def tearDown(self):
+        logger.info("Openstack-CAL-Test: tearDown")
+        
+
+    @unittest.skip("Skipping test_list_flavors")        
+    def test_list_flavor(self):
+        """
+        List existing flavors from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List Flavors Test")
+        rc, rsp = self.cal.get_flavor_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d flavors" %(len(rsp.flavorinfo_list)))
+        for flavor in rsp.flavorinfo_list:
+            rc, flv = self.cal.get_flavor(self._acct, flavor.id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            self.assertEqual(flavor.id, flv.id)
+        
+    @unittest.skip("Skipping test_list_images")                    
+    def test_list_images(self):
+        """
+        List existing images from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List Images Test")
+        rc, rsp = self.cal.get_image_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d images" %(len(rsp.imageinfo_list)))
+        #for image in rsp.imageinfo_list:
+        #    rc, img = self.cal.get_image(self._acct, image.id)
+        #    self.assertEqual(rc, RwStatus.SUCCESS)
+        #    self.assertEqual(image.id, img.id)
+        
+    @unittest.skip("Skipping test_list_vms")                
+    def test_list_vms(self):
+        """
+        List existing VMs from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List VMs Test")
+        rc, rsp = self.cal.get_vm_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d VMs" %(len(rsp.vminfo_list)))
+        for vm in rsp.vminfo_list:
+            rc, server = self.cal.get_vm(self._acct, vm.vm_id)
+            self.assertEqual(vm.vm_id, server.vm_id)
+            
+    @unittest.skip("Skipping test_list_networks")                            
+    def test_list_networks(self):
+        """
+        List existing Network from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List Networks Test")
+        rc, rsp = self.cal.get_network_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d Networks" %(len(rsp.networkinfo_list)))
+        for network in rsp.networkinfo_list:
+            rc, net = self.cal.get_network(self._acct, network.network_id)
+            self.assertEqual(network.network_id, net.network_id)
+        
+    @unittest.skip("Skipping test_list_ports")                                    
+    def test_list_ports(self):
+        """
+        List existing Ports from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List Ports Test")
+        rc, rsp = self.cal.get_port_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        assert(rc == RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d Ports" %(len(rsp.portinfo_list)))
+        for port in rsp.portinfo_list:
+            rc, p = self.cal.get_port(self._acct, port.port_id)
+            self.assertEqual(port.port_id, p.port_id)
+
+    def _get_image_info_request(self):
+        """
+        Returns request object of type RwcalYang.ImageInfoItem()
+        """
+        img = RwcalYang.ImageInfoItem()
+        img.name = "rift.cal.unittest.image"
+        img.location = '/net/sharedfiles/home1/common/vm/rift-root-latest.qcow2'
+        img.disk_format = "qcow2"
+        img.container_format = "bare"
+        img.checksum = OpenStackTest.IMG_Checksum
+        return img
+
+    def _get_image_info(self, img_id):
+        """
+        Checks the image status until it becomes active or timeout occurs (100sec)
+        Returns the image_info dictionary
+        """
+        rs = None
+        rc = None
+        for i in range(100):
+            rc, rs = self.cal.get_image(self._acct, img_id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            logger.info("Openstack-CAL-Test: Image (image_id: %s) reached state : %s" %(img_id, rs.state))
+            if rs.state == 'active':
+                break
+            else:
+                time.sleep(2) # Sleep for a second
+        return rs
+    
+    @unittest.skip("Skipping test_create_delete_image")                            
+    def test_create_delete_image(self):
+        """
+        Create/Query/Delete a new image in openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting Image create test")
+        img = self._get_image_info_request()
+        rc, img_id = self.cal.create_image(self._acct, img)
+        logger.info("Openstack-CAL-Test: Created Image with image_id: %s" %(img_id))
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        img_info = self._get_image_info(img_id)
+        self.assertNotEqual(img_info, None)
+        self.assertEqual(img_id, img_info.id)
+        logger.info("Openstack-CAL-Test: Image (image_id: %s) reached state : %s" %(img_id, img_info.state))
+        self.assertEqual(img_info.has_field('checksum'), True)
+        self.assertEqual(img_info.checksum, OpenStackTest.IMG_Checksum)
+        logger.info("Openstack-CAL-Test: Initiating Delete Image operation for image_id: %s" %(img_id))
+        rc = self.cal.delete_image(self._acct, img_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Image (image_id: %s) successfully deleted" %(img_id))
+
+    def _get_flavor_info_request(self):
+        """
+        Returns request object of type RwcalYang.FlavorInfoItem()
+        """
+        flavor                                     = RwcalYang.FlavorInfoItem()
+        flavor.name                                = 'rift.cal.unittest.flavor'
+        flavor.vm_flavor.memory_mb                 = 16384 # 16GB
+        flavor.vm_flavor.vcpu_count                = 4 
+        flavor.vm_flavor.storage_gb                = 40 # 40GB
+        flavor.guest_epa.mempage_size              = OpenStackTest.MemoryPageSize
+        flavor.guest_epa.cpu_pinning_policy        = OpenStackTest.CpuPolicy
+        flavor.guest_epa.cpu_thread_pinning_policy = OpenStackTest.CpuThreadPolicy
+        flavor.guest_epa.numa_node_policy.node_cnt = OpenStackTest.NumaNodeCount
+        for i in range(OpenStackTest.NumaNodeCount):
+            node = flavor.guest_epa.numa_node_policy.node.add()
+            node.id = i
+            if i == 0:
+                node.vcpu = [0,1]
+            elif i == 1:
+                node.vcpu = [2,3]
+            node.memory_mb = 8196
+        dev = flavor.guest_epa.pcie_device.add()
+        dev.device_id = OpenStackTest.PCIPassThroughAlias
+        dev.count = 1
+        return flavor
+        
+    @unittest.skip("Skipping test_create_delete_flavor")                            
+    def test_create_delete_flavor(self):
+        """
+        Create/Query/Delete a new flavor in openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting Image create/delete test")
+
+        ### Delete any previously created flavor with name rift.cal.unittest.flavor
+        rc, rs = self.cal.get_flavor_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        flavor_list = [ flavor for flavor in rs.flavorinfo_list if flavor.name == 'rift.cal.unittest.flavor' ]
+        if flavor_list:
+            rc = self.cal.delete_flavor(self._acct, flavor_list[0].id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+        
+        flavor = self._get_flavor_info_request()
+        rc, flavor_id = self.cal.create_flavor(self._acct, flavor)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        
+        logger.info("Openstack-CAL-Test: Created new flavor with flavor_id : %s" %(flavor_id))
+        rc, rs = self.cal.get_flavor(self._acct, flavor_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.id, flavor_id)
+
+        # Verify EPA Attributes
+        self.assertEqual(rs.guest_epa.mempage_size, OpenStackTest.MemoryPageSize)
+        self.assertEqual(rs.guest_epa.cpu_pinning_policy, OpenStackTest.CpuPolicy)
+        self.assertEqual(rs.guest_epa.cpu_thread_pinning_policy, OpenStackTest.CpuThreadPolicy)
+        self.assertEqual(rs.guest_epa.numa_node_policy.node_cnt, OpenStackTest.NumaNodeCount)
+        self.assertEqual(len(rs.guest_epa.pcie_device), 1)
+        self.assertEqual(rs.guest_epa.pcie_device[0].device_id, OpenStackTest.PCIPassThroughAlias)
+        self.assertEqual(rs.guest_epa.pcie_device[0].count, 1)
+        logger.info("Openstack-CAL-Test: Initiating delete for flavor_id : %s" %(flavor_id))
+        rc = self.cal.delete_flavor(self._acct, flavor_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        # Check that flavor does not exist anymore in list_flavor
+        rc, rs = self.cal.get_flavor_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        flavor_list = [ flavor for flavor in rs.flavorinfo_list if flavor.id == flavor_id ]
+        # Flavor List should be empty
+        self.assertEqual(len(flavor_list), 0)
+        logger.info("Openstack-CAL-Test: Flavor (flavor_id: %s) successfully deleted" %(flavor_id))
+
+    def _get_vm_info_request(self, flavor_id, image_id):
+        """
+        Returns request object of type RwcalYang.VMInfoItem
+        """
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_name = 'rift.cal.unittest.vm'
+        vm.flavor_id = flavor_id
+        vm.image_id  = image_id
+        vm.cloud_init.userdata = ''
+        vm.user_tags.node_id  = OpenStackTest.NodeID
+        return vm
+
+    def _check_vm_state(self, vm_id, expected_state):
+        """
+        Wait until VM reaches particular state (expected_state). 
+        """
+        # Wait while VM goes to required state
+
+        for i in range(50): # 50 poll iterations...
+            rc, rs = self.cal.get_vm(self._acct, vm_id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            logger.info("Openstack-CAL-Test: VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+            if rs.state == expected_state:
+                break
+            else:
+                time.sleep(1)
+
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.state, expected_state)
+
+    def _create_vm(self, flavor, image, port_list = None):
+        """
+        Create VM and perform validity checks
+        """
+        logger.info("Openstack-CAL-Test: Using image : %s and flavor : %s " %(image.name, flavor.name))
+        vm = self._get_vm_info_request(flavor.id, image.id)
+
+        if port_list:
+            for port_id in port_list:
+                port = vm.port_list.add()
+                port.port_id = port_id 
+
+        rc, vm_id = self.cal.create_vm(self._acct, vm)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Check if VM creation is successful
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Successfully created VM with vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+
+        ### Ensure the VM state is active
+        self._check_vm_state(vm_id, 'ACTIVE')
+
+        ### Ensure that userdata tags are set as expected
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.user_tags.has_field('node_id'), True)
+        self.assertEqual(getattr(rs.user_tags, 'node_id'), OpenStackTest.NodeID)
+        logger.info("Openstack-CAL-Test: Successfully verified the user tags for VM-ID: %s" %(vm_id))
+        return rs, vm_id
+
+    def _delete_vm(self, vm_id):
+        """
+        Delete VM and perform validity checks
+        """
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        logger.info("Openstack-CAL-Test: Initiating VM Delete operation on VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+
+        rc = self.cal.delete_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        for i in range(50):
+            # Check if VM still exists
+            rc, rs = self.cal.get_vm_list(self._acct)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            vm_list = [vm for vm in rs.vminfo_list if vm.vm_id == vm_id]
+            if not len(vm_list):
+                break
+        
+        rc, rs = self.cal.get_vm_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        vm_list = [vm for vm in rs.vminfo_list if vm.vm_id == vm_id]
+        self.assertEqual(len(vm_list), 0)
+        logger.info("Openstack-CAL-Test: VM with vm_id : %s successfully deleted" %(vm_id))
+
+    def _stop_vm(self, vm_id):
+        """
+        Stop VM and perform validity checks
+        """
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Initiating Stop VM operation on VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+        rc = self.cal.stop_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        ### Ensure that VM state is SHUTOFF
+        self._check_vm_state(vm_id, 'SHUTOFF')
+        
+        
+    def _start_vm(self, vm_id):
+        """
+        Starts VM and performs validity checks
+        """
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Initiating Start VM operation on VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+        rc = self.cal.start_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Ensure that VM state is ACTIVE
+        self._check_vm_state(vm_id, 'ACTIVE')
+
+        
+    def _reboot_vm(self, vm_id):
+        """
+        Reboot VM and perform validity checks
+        """
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Initiating Reboot VM operation on VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+        rc = self.cal.reboot_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Ensure that VM state is ACTIVE
+        self._check_vm_state(vm_id, 'ACTIVE')
+
+    def assert_vm(self, vm_data, flavor):
+        """Verify the newly created VM for attributes specified in the flavor.
+
+        Args:
+            vm_data (VmData): Instance of the newly created VM
+            flavor (FlavorInfoItem): Config flavor.
+        """
+        vm_config = flavor
+
+        # Page size seems to be 4096, regardless of the page size name.
+        page_lookup = {"large": '4096', "small": '4096'}
+        FIELDS = ["vcpus", "cpu_threads", "memory_page_size", "disk",
+                  "numa_node_count", "memory", "pci_passthrough_device_list"]
+
+        for field in FIELDS:
+            if field not in vm_config:
+                continue
+
+            vm_value = getattr(vm_data, field)
+            config_value = getattr(vm_config, field)
+
+            if field == "memory_page_size":
+                config_value = page_lookup[config_value]
+
+            if field == "memory":
+                config_value = int(config_value/1000)
+
+            if field == "pci_passthrough_device_list":
+                config_value = len(config_value)
+                vm_value = len(vm_value)
+
+            self.assertEqual(vm_value, config_value)
+
+    @unittest.skip("Skipping test_vm_epa_attributes")
+    def test_vm_epa_attributes(self):
+        """
+        Primary goal: To create a VM with the specified EPA Attributes
+        Secondary goal: To verify flavor creation/delete
+        """
+
+        logger.info("Openstack-CAL-Test: Starting VM(EPA) create/delete test")
+        flavor = self._get_flavor_info_request()
+   
+        rc, flavor_id = self.cal.do_create_flavor(self._acct, flavor)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        flavor.id = flavor_id
+
+        data, vm_id = self._create_vm(flavor, self._image)
+
+        vm_data = VmData(data.host_name, data.management_ip)
+        self.assert_vm(vm_data, flavor)
+
+        self._delete_vm(vm_id)
+
+        rc = self.cal.do_delete_flavor(self._acct, flavor_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+    @unittest.skip("Skipping test_expiry_token")
+    def test_expiry_token(self):
+        """
+        Primary goal: To verify if we are refreshing the expired tokens.
+        """
+        logger.info("Openstack-CAL-Test: Starting token refresh test")
+        drv = KeystoneDriver(
+                openstack_info['username'],
+                openstack_info['password'],
+                openstack_info['auth_url'],
+                openstack_info['project_name'])
+        # Get hold of the client instance need for Token Manager
+        client = drv._get_keystone_connection()
+
+        auth_ref = client.auth_ref
+        token = auth_ref['auth_token']
+
+        # Verify if the newly acquired token works.
+        nova = NovaDriver(drv)
+        flavors = nova.flavor_list()
+        self.assertTrue(len(flavors) > 1)
+
+        # Invalidate the token
+        token_manger = ksclient.tokens.TokenManager(client)
+        token_manger.revoke_token(token)
+
+        time.sleep(10)
+
+        unauth_exp = False
+        try:
+            flavors = nova.flavor_list()
+            print (flavors)
+        except nova_exception.AuthorizationFailure:
+            unauth_exp = True
+
+        self.assertTrue(unauth_exp)
+
+        # Explicitly reset the expire time, to test if we acquire a new token
+        now = datetime.datetime.utcnow()
+        time_str = format(now, "%Y-%m-%dT%H:%M:%S.%fZ")
+        drv._get_keystone_connection().auth_ref['expires_at'] = time_str
+
+        flavors = nova.flavor_list()
+        self.assertTrue(len(flavors) > 1)
+
+    @unittest.skip("Skipping test_vm_operations")                            
+    def test_vm_operations(self):
+        """
+        Primary goal: Create/Query/Delete VM in openstack installation.
+        Secondary goal: VM pause/resume operations on VM.
+
+        """
+        logger.info("Openstack-CAL-Test: Starting VM Operations test")
+
+        # Create VM
+        data, vm_id = self._create_vm(self._flavor, self._image)
+
+        # Stop VM
+        self._stop_vm(vm_id)
+        # Start VM
+        self._start_vm(vm_id)
+
+        vm_data = VmData(data.host_name, data.management_ip)
+        self.assert_vm(vm_data, self._flavor)
+
+        # Reboot VM
+        self._reboot_vm(vm_id)
+        ### Delete the VM
+        self._delete_vm(vm_id)
+
+        
+    def _get_network_info_request(self):
+        """
+        Returns request object of type RwcalYang.NetworkInfoItem
+        """
+        network                            = RwcalYang.NetworkInfoItem()
+        network.network_name               = 'rift.cal.unittest.network'
+        network.subnet                     = '192.168.16.0/24'
+        if openstack_info['physical_network']:
+            network.provider_network.physical_network = openstack_info['physical_network']
+        if openstack_info['network_type']:
+            network.provider_network.overlay_type     = openstack_info['network_type']
+        if OpenStackTest.SEG_ID:
+            network.provider_network.segmentation_id  = OpenStackTest.SEG_ID
+            OpenStackTest.SEG_ID += 1
+        return network
+
+
+    def _create_network(self):
+        """
+        Create a network and verify that network creation is successful
+        """
+        network = self._get_network_info_request()
+
+        ### Create network
+        logger.info("Openstack-CAL-Test: Creating a network with name : %s" %(network.network_name))
+        rc, net_id = self.cal.create_network(self._acct, network)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Verify network is created successfully
+        rc, rs = self.cal.get_network(self._acct, net_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Successfully create Network : %s  with id : %s." %(network.network_name, net_id ))
+
+        return net_id
+
+    def _delete_network(self, net_id):
+        """
+        Delete network and verify that delete operation is successful
+        """
+        rc, rs = self.cal.get_network(self._acct, net_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        logger.info("Openstack-CAL-Test: Deleting a network with id : %s. " %(net_id))
+        rc = self.cal.delete_network(self._acct, net_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        
+        # Verify that network is no longer available via get_network_list API
+        rc, rs = self.cal.get_network_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        network_info = [ network for network in rs.networkinfo_list if network.network_id == net_id ]
+        self.assertEqual(len(network_info), 0)
+        logger.info("Openstack-CAL-Test: Successfully deleted Network with id : %s" %(net_id))
+        
+        
+    @unittest.skip("Skipping test_network_operations")                            
+    def test_network_operations(self):
+        """
+        Create/Delete Networks
+        """
+        logger.info("Openstack-CAL-Test: Starting Network Operation test")
+
+        ### Create Network
+        net_id = self._create_network()
+
+        ### Delete Network
+        self._delete_network(net_id)
+
+    def _get_port_info_request(self, network_id, vm_id):
+        """
+        Returns an object of type RwcalYang.PortInfoItem
+        """
+        port = RwcalYang.PortInfoItem()
+        port.port_name = 'rift.cal.unittest.port'
+        port.network_id = network_id
+        if vm_id != None:
+            port.vm_id = vm_id
+        return port
+
+    def _create_port(self, net_id, vm_id = None):
+        """
+        Create a port in network with network_id: net_id and verifies that operation is successful
+        """
+        if vm_id != None:
+            logger.info("Openstack-CAL-Test: Creating a port in network with network_id: %s and VM with vm_id: %s" %(net_id, vm_id))
+        else:
+            logger.info("Openstack-CAL-Test: Creating a port in network with network_id: %s" %(net_id))
+
+        ### Create Port
+        port = self._get_port_info_request(net_id, vm_id)
+        rc, port_id = self.cal.create_port(self._acct, port)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Get Port
+        rc, rs = self.cal.get_port(self._acct, port_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Successfully create Port with id : %s. Port State :  %s" %(port_id, rs.port_state))
+
+        return port_id
+
+    def _delete_port(self, port_id):
+        """
+        Deletes a port and verifies that operation is successful
+        """
+        rc, rs = self.cal.get_port(self._acct, port_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Deleting Port with id : %s. Port State :  %s" %(port_id, rs.port_state))
+
+        ### Delete Port
+        self.cal.delete_port(self._acct, port_id)
+        
+        rc, rs = self.cal.get_port_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        port_list = [ port for port in rs.portinfo_list if port.port_id == port_id ]
+        self.assertEqual(len(port_list), 0)
+        logger.info("Openstack-CAL-Test: Successfully Deleted Port with id : %s" %(port_id))
+
+    def _monitor_port(self, port_id, expected_state):
+        """
+        Monitor the port state until it reaches expected_state
+        """
+        for i in range(50):
+            rc, rs = self.cal.get_port(self._acct, port_id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            logger.info("Openstack-CAL-Test: Port with id : %s. Port State :  %s" %(port_id, rs.port_state))
+            if rs.port_state == expected_state:
+                break
+        rc, rs = self.cal.get_port(self._acct, port_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.port_state, expected_state)
+        logger.info("Openstack-CAL-Test: Port with port_id : %s reached expected state  : %s" %(port_id, rs.port_state))
+            
+    @unittest.skip("Skipping test_port_operations_with_vm")
+    def test_port_operations_with_vm(self):
+        """
+        Create/Delete Ports in a network and associate it with a VM
+        """
+        logger.info("Openstack-CAL-Test: Starting Port Operation test with VM")
+
+        ### First create a network
+        net_id = self._create_network()
+
+        ### Create a VM
+        data, vm_id = self._create_vm(self._flavor, self._image)
+
+        ### Now create Port which connects VM to Network
+        port_id = self._create_port(net_id, vm_id)
+
+        ### Verify that port goes to active state
+        self._monitor_port(port_id, 'ACTIVE')
+
+        ### Delete VM
+        self._delete_vm(vm_id)
+        
+        ### Delete Port
+        self._delete_port(port_id)
+
+        ### Delete the network
+        self._delete_network(net_id)
+
+    @unittest.skip("Skipping test_create_vm_with_port")
+    def test_create_vm_with_port(self):
+        """
+        Create VM and add ports to it during boot time.
+        """
+        logger.info("Openstack-CAL-Test: Starting Create VM with port test")
+
+        ### First create a network
+        net_id = self._create_network()
+
+        ### Now create Port which connects VM to Network
+        port_id = self._create_port(net_id)
+
+        ### Create a VM
+        data, vm_id = self._create_vm(self._flavor, self._image, [port_id])
+
+        ### Verify that port goes to active state
+        self._monitor_port(port_id, 'ACTIVE')
+
+        ### Delete VM
+        self._delete_vm(vm_id)
+        
+        ### Delete Port
+        self._delete_port(port_id)
+
+        ### Delete the network
+        self._delete_network(net_id)
+
+    @unittest.skip("Skipping test_get_vdu_list")
+    def test_get_vdu_list(self):
+        """
+        Test the get_vdu_list API
+        """
+        logger.info("Openstack-CAL-Test: Test Get VDU List APIs")
+        rc, rsp = self.cal.get_vdu_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d VDUs" %(len(rsp.vdu_info_list)))
+        for vdu in rsp.vdu_info_list:
+            rc, vdu2 = self.cal.get_vdu(self._acct, vdu.vdu_id)
+            self.assertEqual(vdu2.vdu_id, vdu.vdu_id)
+
+
+    @unittest.skip("Skipping test_get_virtual_link_list")
+    def test_get_virtual_link_list(self):
+        """
+        Test the get_virtual_link_list API
+        """
+        logger.info("Openstack-CAL-Test: Test Get virtual_link List APIs")
+        rc, rsp = self.cal.get_virtual_link_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d virtual_links" %(len(rsp.virtual_link_info_list)))
+        for virtual_link in rsp.virtual_link_info_list:
+            rc, virtual_link2 = self.cal.get_virtual_link(self._acct, virtual_link.virtual_link_id)
+            self.assertEqual(virtual_link2.virtual_link_id, virtual_link.virtual_link_id)
+
+    def _get_virtual_link_request_info(self):
+        """
+        Returns object of type RwcalYang.VirtualLinkReqParams
+        """
+        vlink = RwcalYang.VirtualLinkReqParams()
+        vlink.name = 'rift.cal.virtual_link'
+        vlink.subnet = '192.168.1.0/24'
+        if openstack_info['physical_network']:
+            vlink.provider_network.physical_network = openstack_info['physical_network']
+        if openstack_info['network_type']:
+            vlink.provider_network.overlay_type     = openstack_info['network_type'].upper()
+        if OpenStackTest.SEG_ID:
+            vlink.provider_network.segmentation_id  = OpenStackTest.SEG_ID
+            OpenStackTest.SEG_ID += 1
+        return vlink
+        
+    def _get_vdu_request_info(self, virtual_link_id):
+        """
+        Returns object of type RwcalYang.VDUInitParams
+        """
+        vdu = RwcalYang.VDUInitParams()
+        vdu.name = "cal.vdu"
+        vdu.node_id = OpenStackTest.NodeID
+        vdu.image_id = self._image.id
+        vdu.flavor_id = self._flavor.id
+        vdu.vdu_init.userdata = ''
+        vdu.allocate_public_address = True
+        c1 = vdu.connection_points.add()
+        c1.name = "c_point1"
+        c1.virtual_link_id = virtual_link_id
+        c1.type_yang = 'VIRTIO'
+        return vdu
+
+    def _get_vdu_modify_request_info(self, vdu_id, virtual_link_id):
+        """
+        Returns object of type RwcalYang.VDUModifyParams
+        """
+        vdu = RwcalYang.VDUModifyParams()
+        vdu.vdu_id = vdu_id
+        c1 = vdu.connection_points_add.add()
+        c1.name = "c_modify1"
+        c1.virtual_link_id = virtual_link_id
+       
+        return vdu 
+        
+    #@unittest.skip("Skipping test_create_delete_virtual_link_and_vdu")
+    def test_create_delete_virtual_link_and_vdu(self):
+        """
+        Test to create VDU
+        """
+        logger.info("Openstack-CAL-Test: Test Create Virtual Link API")
+        vlink_req = self._get_virtual_link_request_info()
+
+        rc, rsp = self.cal.create_virtual_link(self._acct, vlink_req)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Created virtual_link with Id: %s" %rsp)
+        vlink_id = rsp
+        
+        #Check if virtual_link create is successful
+        rc, rsp = self.cal.get_virtual_link(self._acct, rsp)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rsp.virtual_link_id, vlink_id)
+
+        # Now create VDU
+        vdu_req = self._get_vdu_request_info(vlink_id)
+        logger.info("Openstack-CAL-Test: Test Create VDU API")
+
+        rc, rsp = self.cal.create_vdu(self._acct, vdu_req)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Created vdu with Id: %s" %rsp)
+
+        vdu_id = rsp
+
+        ## Check if VDU create is successful
+        rc, rsp = self.cal.get_vdu(self._acct, rsp)
+        self.assertEqual(rsp.vdu_id, vdu_id)
+
+        ### Wait until vdu_state is active
+        for i in range(50):
+            rc, rs = self.cal.get_vdu(self._acct, vdu_id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            logger.info("Openstack-CAL-Test: VDU with id : %s. Reached State :  %s" %(vdu_id, rs.state))
+            if rs.state == 'active':
+                break
+        rc, rs = self.cal.get_vdu(self._acct, vdu_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.state, 'active')
+        logger.info("Openstack-CAL-Test: VDU with id : %s reached expected state  : %s" %(vdu_id, rs.state))
+        logger.info("Openstack-CAL-Test: VDUInfo: %s" %(rs))
+        
+        vlink_req = self._get_virtual_link_request_info()
+
+        ### Create another virtual_link
+        rc, rsp = self.cal.create_virtual_link(self._acct, vlink_req)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Created virtual_link with Id: %s" %rsp)
+        vlink_id2= rsp
+
+        ### Now exercise the modify_vdu_api
+        vdu_modify = self._get_vdu_modify_request_info(vdu_id, vlink_id2)
+        rc = self.cal.modify_vdu(self._acct, vdu_modify)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Modified vdu with Id: %s" %vdu_id)
+
+        ### Lets delete the VDU
+        self.cal.delete_vdu(self._acct, vdu_id)
+
+        ### Lets delete the Virtual Link
+        self.cal.delete_virtual_link(self._acct, vlink_id)
+
+        ### Lets delete the Virtual Link-2
+        self.cal.delete_virtual_link(self._acct, vlink_id2)
+
+        time.sleep(5)
+        ### Verify that VDU and virtual link are successfully deleted
+        rc, rsp = self.cal.get_vdu_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        for vdu in rsp.vdu_info_list:
+            self.assertNotEqual(vdu.vdu_id, vdu_id)
+
+        rc, rsp = self.cal.get_virtual_link_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        for virtual_link in rsp.virtual_link_info_list:
+            self.assertNotEqual(virtual_link.virtual_link_id, vlink_id)
+
+        logger.info("Openstack-CAL-Test: VDU/Virtual Link create-delete test successfully completed")
+
+
+class VmData(object):
+    """A convenience class that provides all the stats and EPA Attributes
+    from the VM provided
+    """
+    def __init__(self, host, mgmt_ip):
+        """
+        Args:
+            host (str): host name.
+            mgmt_ip (str): The IP of the newly created VM.
+        """
+        # Sleep for 20s to ensure the VM is UP and ready to run commands
+        time.sleep(20)
+        logger.info("Connecting to host: {} and IP: {}".format(host, mgmt_ip))
+        self.client = paramiko.SSHClient()
+        self.client.set_missing_host_key_policy(paramiko.WarningPolicy())
+        self.client.connect(host)
+        self.ip = mgmt_ip
+
+        # Get all data from the newly created VM.
+        self._data = self._get_data()
+        self._page_size = self._exec_and_clean("getconf PAGE_SIZE")
+        self._disk_space = self._exec_and_clean(
+                "df -kh --output=size /",
+                line_no=1)
+        self._pci_data = self._exec('lspci -m | grep "10-Gigabit"')
+
+    def _get_data(self,):
+        """Runs the command and store the output in a python dict.
+
+        Returns:
+            dict: Containing all key => value pairs.
+        """
+        content = {}
+        cmds = ["lscpu", 'less /proc/meminfo']
+        for cmd in cmds:
+            ssh_out = self._exec(cmd)
+            content.update(self._convert_to_dict(ssh_out))
+        return content
+
+    def _exec_and_clean(self, cmd, line_no=0):
+        """A convenience method to run a command and extract the specified line
+        number.
+
+        Args:
+            cmd (str): Command to execute
+            line_no (int, optional): Default to 0, extracts the first line.
+
+        Returns:
+            str: line_no of the output of the command.
+        """
+        output = self._exec(cmd)[line_no]
+        output = ' '.join(output.split())
+        return output.strip()
+
+    def _exec(self, cmd):
+        """Thin wrapper that runs the command and returns the stdout data
+
+        Args:
+            cmd (str): Command to execute.
+
+        Returns:
+            list: Contains the command output.
+        """
+        _, ssh_out, _ = self.client.exec_command(
+                "/usr/rift/bin/ssh_root {} {}".format(self.ip,
+                                                      cmd))
+        return ssh_out.readlines()
+
+    def _convert_to_dict(self, content):
+        """convenience method that cleans and stores the line into dict.
+        data is split based on ":" or " ".
+
+        Args:
+            content (list): A list containing the stdout.
+
+        Returns:
+            dict: containing stat attribute => value.
+        """
+        flattened = {}
+        for line in content:
+            line = ' '.join(line.split())
+            if ":" in line:
+                key, value = line.split(":")
+            else:
+                key, value = line.split(" ")
+            key, value = key.strip(), value.strip()
+            flattened[key] = value
+        return flattened
+
+    @property
+    def disk(self):
+        disk = self._disk_space.replace("G", "")
+        return int(disk)
+
+    @property
+    def numa_node_count(self):
+        numa_cores = self._data['NUMA node(s)']
+        numa_cores = int(numa_cores)
+        return numa_cores
+
+    @property
+    def vcpus(self):
+        cores = int(self._data['CPU(s)'])
+        return cores
+
+    @property
+    def cpu_threads(self):
+        threads = int(self._data['Thread(s) per core'])
+        return threads
+
+    @property
+    def memory(self):
+        memory = self._data['MemTotal']
+        memory = int(memory.replace("kB", ""))/1000/1000
+        return int(memory)
+
+    @property
+    def memory_page_size(self):
+        return self._page_size
+
+    @property
+    def pci_passthrough_device_list(self):
+        return self._pci_data
+
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.INFO)
+    unittest.main()
diff --git a/modules/core/rwvx/rwcal/test/test_rwlxc_rwlaunchpad.py b/modules/core/rwvx/rwcal/test/test_rwlxc_rwlaunchpad.py
new file mode 100644 (file)
index 0000000..de0c588
--- /dev/null
@@ -0,0 +1,42 @@
+#!/usr/bin/env python3
+
+# 
+# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
+#
+
+
+import logging
+import os
+
+import rift.rwcal.cloudsim.lxc as lxc
+import rift.rwcal.cloudsim.lvm as lvm
+
+
+logger = logging.getLogger('rwcal-test')
+
+
+def main():
+    template = os.path.realpath("../rift/cal/lxc-fedora-rift.lxctemplate")
+    tarfile = "/net/strange/localdisk/jdowner/lxc.tar.gz"
+    volume = 'rift-test'
+
+    lvm.create(volume, '/lvm/rift-test.img')
+
+    master = lxc.create_container('test-master', template, volume, tarfile)
+
+    snapshots = []
+    for index in range(5):
+        snapshots.append(master.snapshot('test-snap-{}'.format(index + 1)))
+
+    for snapshot in snapshots:
+        snapshot.destroy()
+
+    master.destroy()
+
+    lvm.destroy(volume)
+
+
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.DEBUG)
+    main()