RIFT OSM R1 Initial Submission

Signed-off-by: Jeremy Mordkoff <jeremy.mordkoff@riftio.com>
diff --git a/rwcal/CMakeLists.txt b/rwcal/CMakeLists.txt
new file mode 100644
index 0000000..8eba04a
--- /dev/null
+++ b/rwcal/CMakeLists.txt
@@ -0,0 +1,60 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 2014/05/22
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(PKG_NAME rwcal)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+set(subdirs src plugins test)
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
+
+install(FILES include/riftware/rwcal-api.h
+  DESTINATION usr/include/riftware
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+    PROGRAMS
+    etc/userdata-template
+  DESTINATION etc
+  COMPONENT ${PKG_LONG_NAME}
+  )
+
+
+rift_python_install_tree(
+  FILES
+    rift/cal/client.py
+    rift/cal/server/__init__.py
+    rift/cal/server/app.py
+    rift/cal/server/operations.py
+    rift/cal/server/server.py
+    rift/cal/utils.py    
+    rift/cal/rwcal_status.py
+  PYTHON3_ONLY
+  COMPONENT rwcal-1.0)
+
+install(
+  PROGRAMS
+    rift/cal/cloudsim
+  DESTINATION usr/bin
+  COMPONENT rwcal-1.0
+  )
+
diff --git a/rwcal/Makefile b/rwcal/Makefile
new file mode 100644
index 0000000..14f3400
--- /dev/null
+++ b/rwcal/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 05/22/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/etc/userdata-template b/rwcal/etc/userdata-template
new file mode 100644
index 0000000..3195864
--- /dev/null
+++ b/rwcal/etc/userdata-template
@@ -0,0 +1,33 @@
+#cloud-config
+
+# run commands
+# default: none
+# runcmd contains a list of either lists or a string
+# each item will be executed in order at rc.local like level with
+# output to the console
+# - if the item is a list, the items will be properly executed as if
+#   passed to execve(3) (with the first arg as the command).
+# - if the item is a string, it will be simply written to the file and
+#   will be interpreted by 'sh'
+#
+# Note, that the list has to be proper yaml, so you have to escape
+# any characters yaml would eat (':' can be problematic)
+runcmd:
+ - [ ls, -l, / ]
+
+salt_minion:
+  conf:
+    master: {master_ip}
+    id: {lxcname}
+    acceptance_wait_time: 1
+    recon_default: 100
+    recon_max: 1000
+    recon_randomize: False
+    log_level: debug
+
+# For some unknown reason, the minion sometimes does not start
+# (and doesn't even leave a log file).  Force a start just in case
+runcmd:
+ - echo Sleeping for 5 seconds and attempting to start minion
+ - sleep 5
+ - /bin/systemctl start salt-minion.service
diff --git a/rwcal/include/riftware/rwcal-api.h b/rwcal/include/riftware/rwcal-api.h
new file mode 100644
index 0000000..6ef5f6a
--- /dev/null
+++ b/rwcal/include/riftware/rwcal-api.h
@@ -0,0 +1,561 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+/**
+ * @file rwvx.h
+ * @author Justin Bronder (justin.bronder@riftio.com)
+ * @date 09/29/2014
+ * @brief Top level API include for rwcal submodule
+ */
+
+#ifndef __RWCAL_API_H__
+#define __RWCAL_API_H__
+
+#include <stdbool.h>
+
+#include <libpeas/peas.h>
+
+#include <rwcal.h>
+#include <rwlib.h>
+#include <rw-manifest.pb-c.h>
+#include <rw_vx_plugin.h>
+
+#include "rwlog.h"
+
+__BEGIN_DECLS
+
+struct rwcal_module_s {
+  rw_vx_framework_t * framework;
+  rw_vx_modinst_common_t *mip;
+
+  PeasExtension * cloud;
+  RwCalCloud * cloud_cls;
+  RwCalCloudIface * cloud_iface;
+
+  rwlog_ctx_t *rwlog_instance;
+};
+typedef struct rwcal_module_s * rwcal_module_ptr_t;
+
+// Redefine yang autonames
+typedef RWPB_E(RwManifest_RwcalCloudType) rwcal_cloud_type;
+
+/*
+ * Allocate a rwcal module.  Once allocated, the clients within
+ * the module still need to be initialized.  For rwzk, see
+ * rwcal_rwzk_{kazoo,zake}_init().  For rwcloud, see
+ * rwcal_cloud_init().  It is a fatal error to attempt to use any
+ * client before it has been initialized.  However, it is
+ * perfectly fine to not initialize a client that will remain
+ * unused.  Note that every function contains the client that it
+ * will use as part of the name, just after the rwcal_ prefix.
+ *
+ * @return - rwcal module handle or NULL on failure.
+ */
+rwcal_module_ptr_t rwcal_module_alloc();
+
+/*
+ * Deallocate a rwcal module.
+ *
+ * @param - pointer to the rwcal module to be deallocated.
+ */
+void rwcal_module_free(rwcal_module_ptr_t * rwcal);
+
+
+/*
+ * Initialize the rwcal cloud controller.
+ *
+ * key/secret for various cloud types:
+ *  EC2: ACCESS_ID/SECRET_KEY
+ *
+ * @param rwcal       - module handle.
+ * @return        - RW_STATUS_SUCCESS,
+ *                  RW_STATUS_NOTFOUND if the type is unknown,
+ *                  RW_STATUS_FAILURE otherwise.
+ */
+rw_status_t rwcal_cloud_init(rwcal_module_ptr_t rwcal);
+
+/*
+ * Get a list of the names of the available images that can be
+ * used to start a new VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param image_names - on success, contains a NULL-terminated
+ *                      list of image names.
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_image_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **images);
+
+/*
+ * Delete Image.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param image_id    - id of image to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_image(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * image_id);
+
+/*
+ * Create a flavor.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param flavor      - rwpb_gi_Rwcal_FlavorInfoItem object describing the
+ *                      flavor to be created
+ * @param flavor_id   - on success, contains a NULL-terminated string containing the new flavor_id
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_flavor(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_FlavorInfoItem *flavor,
+    char *flavor_id);
+
+
+/*
+ * Delete flavor.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param flavor_id   - id of flavor to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_flavor(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * flavor_id);
+
+/*
+ * Get a specific flavor
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param flavor_id   - id of the flavor to return
+ * @param flavir      - rwpb_gi_Rwcal_FlavorInfoItem object containing the
+ *                      details of the requested flavor
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_flavor(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * flavor_id,
+    rwpb_gi_Rwcal_FlavorInfoItem **flavor);
+
+/*
+ * Get a list of the details for all flavors
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param flavors     - on success, contains a list of flavor info objects
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_flavor_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **flavors);
+
+/*
+ * Create a virtual machine.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm          - the information that defines what kind of VM will be
+ *                      created
+ * @param vm_id       - on success, contains a NULL-terminated string
+ *                      containing the new vm id
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VMInfoItem *vm,
+    char **vm_id);
+
+/*
+ * Delete VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm_id       - id of vm to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * vm_id);
+
+/*
+ * Reboot VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm_id       - id of vm to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_reboot_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * vm_id);
+
+/*
+ * Start VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm_id       - id of a vm to start
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_start_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * vm_id);
+
+/*
+ * Stop VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm_id       - id of a vm to stop
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_stop_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * vm_id);
+
+/*
+ * Get a list of the names of the available vms
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vms         - on success, contains a NULL-terminated
+ *                      list of vms.
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_vm_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources** vms);
+
+/*
+ * Create a tenant.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param tenant_name - name to assign to the tenant.
+ * @param tenant_info - on success, contains a NULL-terminated list of tenant_info
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_tenant(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * tenant_name,
+    char *** tenant_info);
+
+/*
+ * Delete tenant.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param tenant_id   - id of tenant to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_tenant(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * tenant_id);
+
+/*
+ * Get a list of the available tenants
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param tenants     - on success, contains a NULL-terminated
+ *                      list of tenants.
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_tenant_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **tenants);
+
+/*
+ * Create a role.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param role_name   - name to assign to the role.
+ * @param role_info   - on success, contains a NULL-terminated list of role_info
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_role(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * role_name,
+    char *** role_info);
+
+/*
+ * Delete role.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param role_id     - id of role to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_role(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * role_id);
+
+/*
+ * Get a list of the available roles
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param roles       - on success, contains a NULL-terminated
+ *                      list of roles.
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_role_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **roles);
+
+/*
+ * Add a new host
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param host        - host info
+ * @param host_id     - on success, contains a NULL-terminated string
+ *                      containing the new host_id
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_add_host(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_HostInfoItem *host,
+    char **host_id);
+
+/*
+ * Remove a new host
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param host_id     - the id of the host to remove
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_remove_host(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *host_id);
+
+/*
+ * Get a specific host
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param host_id     - the id of the host to return
+ * @param host        - the requested host info
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_host(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *host_id,
+    rwpb_gi_Rwcal_HostInfoItem **host);
+
+/*
+ * Get a list of hosts
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param hosts       - on success, contains a NULL-terminated list of hosts.
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_host_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **hosts);
+
+/*
+ * Create a new port
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param port        - port info
+ * @param port_id     - on success, contains a NULL-terminated string
+ *                      containing the new port id
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_port(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_PortInfoItem *port,
+    char **port_id);
+
+/*
+ * Delete a port
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param port_id     - the id of the port to remove
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_port(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *port_id);
+
+/*
+ * Get a specific port
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param port_id     - the id of the port to return
+ * @param port        - the requested port info
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_port(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *port_id,
+    rwpb_gi_Rwcal_PortInfoItem **port);
+
+/*
+ * Get a list of ports
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param ports       - on success, contains a NULL-terminated list of ports.
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_port_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **ports);
+
+/*
+ * Create a new network
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param network     - network info
+ * @param network_id  - on success, contains a NULL-terminated string
+ *                      containing the new network id
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_network(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_NetworkInfoItem *network,
+    char **network_id);
+
+/*
+ * Delete a network
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param network_id  - the id of the network to remove
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_network(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *network_id);
+
+/*
+ * Get a specific network
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param network_id  - the id of the network to return
+ * @param network     - the requested network info
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_network(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *network_id,
+    rwpb_gi_Rwcal_NetworkInfoItem **network);
+
+/*
+ * Get a the management network
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param network     - the management network info
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_management_network(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_NetworkInfoItem **network);
+
+/*
+ * Get a list of networks
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param networks    - on success, contains a NULL-terminated list of networks.
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_network_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **networks);
+
+/*
+ * Get a RwLog Context so that log messages can go to rwlog
+ *
+ * @param rwcal       - module handle.
+ *
+ * @return            - rwlog_ctx_t
+ */
+rwlog_ctx_t *rwcal_get_rwlog_ctx(rwcal_module_ptr_t rwcal);
+
+__END_DECLS
+
+#endif
+
+
diff --git a/rwcal/plugins/CMakeLists.txt b/rwcal/plugins/CMakeLists.txt
new file mode 100644
index 0000000..28c67ce
--- /dev/null
+++ b/rwcal/plugins/CMakeLists.txt
@@ -0,0 +1,23 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 2014/05/22
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs vala yang rwcalproxytasklet)
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwcal/plugins/rwcalproxytasklet/CMakeLists.txt b/rwcal/plugins/rwcalproxytasklet/CMakeLists.txt
new file mode 100644
index 0000000..b700ca6
--- /dev/null
+++ b/rwcal/plugins/rwcalproxytasklet/CMakeLists.txt
@@ -0,0 +1,31 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwcalproxytasklet rwcalproxytasklet.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/rwcalproxytasklet/__init__.py
+    rift/tasklets/rwcalproxytasklet/rwcalproxytasklet.py
+  COMPONENT rwcalproxytasklet-1.0
+  PYTHON3_ONLY)
diff --git a/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/__init__.py b/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/__init__.py
new file mode 100644
index 0000000..94af0b3
--- /dev/null
+++ b/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/__init__.py
@@ -0,0 +1 @@
+from .rwcalproxytasklet import RwCalProxyTasklet
diff --git a/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/rwcalproxytasklet.py b/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/rwcalproxytasklet.py
new file mode 100644
index 0000000..bb2c355
--- /dev/null
+++ b/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/rwcalproxytasklet.py
@@ -0,0 +1,633 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file rwcalproxytasklet.py
+@author Austin Cormier(austin.cormier@riftio.com)
+@date 2015-10-20
+"""
+
+import asyncio
+import collections
+import concurrent.futures
+import logging
+import os
+import sys
+
+import tornado
+import tornado.httpserver
+import tornado.web
+import tornado.platform.asyncio
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwCal', '1.0')
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwcalYang,
+    RwTypes,
+)
+
+import rw_peas
+import rift.tasklets
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class CalCallFailure(Exception):
+    pass
+
+
+class RPCParam(object):
+    def __init__(self, key, proto_type=None):
+        self.key = key
+        self.proto_type = proto_type
+
+
+class CalRequestHandler(tornado.web.RequestHandler):
+    def initialize(self, log, loop, cal, account, executor, cal_method,
+                   input_params=None, output_params=None):
+        self.log = log
+        self.loop = loop
+        self.cal = cal
+        self.account = account
+        self.executor = executor
+        self.cal_method = cal_method
+        self.input_params = input_params
+        self.output_params = output_params
+
+    def wrap_status_fn(self, fn, *args, **kwargs):
+        ret = fn(*args, **kwargs)
+        if not isinstance(ret, collections.Iterable):
+            ret = [ret]
+
+        rw_status = ret[0]
+        if type(rw_status) != RwTypes.RwStatus:
+            raise ValueError("First return value of %s function was not a RwStatus" %
+                             fn.__name__)
+
+        if rw_status != RwTypes.RwStatus.SUCCESS:
+            msg = "%s returned %s" % (fn.__name__, str(rw_status))
+            self.log.error(msg)
+            raise CalCallFailure(msg)
+
+        return ret[1:]
+
+    @tornado.gen.coroutine
+    def post(self):
+        def body_to_cal_args():
+            cal_args = []
+            if self.input_params is None:
+                return cal_args
+
+            input_dict = tornado.escape.json_decode(self.request.body)
+            if len(input_dict) != len(self.input_params):
+                raise ValueError("Got %s parameters, expected %s" %
+                                 (len(input_dict), len(self.input_params)))
+
+            for input_param in self.input_params:
+                key = input_param.key
+                value = input_dict[key]
+                proto_type = input_param.proto_type
+
+                if proto_type is not None:
+                    proto_cls = getattr(RwcalYang, proto_type)
+                    self.log.debug("Deserializing into %s type", proto_cls)
+                    value = proto_cls.from_dict(value)
+
+                cal_args.append(value)
+
+            return cal_args
+
+        def cal_return_vals(return_vals):
+            output_params = self.output_params
+            if output_params is None:
+                output_params = []
+
+            if len(return_vals) != len(output_params):
+                raise ValueError("Got %s return values.  Expected %s",
+                                 len(return_vals), len(output_params))
+
+            write_dict = {"return_vals": []}
+            for i, output_param in enumerate(output_params):
+                key = output_param.key
+                proto_type = output_param.proto_type
+                output_value = return_vals[i]
+
+                if proto_type is not None:
+                    output_value = output_value.as_dict()
+
+                return_val = {
+                        "key": key,
+                        "value": output_value,
+                        "proto_type": proto_type,
+                        }
+
+                write_dict["return_vals"].append(return_val)
+
+            return write_dict
+
+        @asyncio.coroutine
+        def handle_request():
+            self.log.debug("Got cloudsimproxy POST request: %s", self.request.body)
+            cal_args = body_to_cal_args()
+
+            # Execute the CAL request in a seperate thread to prevent
+            # blocking the main loop.
+            return_vals = yield from self.loop.run_in_executor(
+                    self.executor,
+                    self.wrap_status_fn,
+                    getattr(self.cal, self.cal_method),
+                    self.account,
+                    *cal_args
+                    )
+
+            return cal_return_vals(return_vals)
+
+        f = asyncio.ensure_future(handle_request(), loop=self.loop)
+        return_dict = yield tornado.platform.asyncio.to_tornado_future(f)
+
+        self.log.debug("Responding to %s RPC with %s", self.cal_method, return_dict)
+
+        self.clear()
+        self.set_status(200)
+        self.write(return_dict)
+
+
+class CalProxyApp(tornado.web.Application):
+    def __init__(self, log, loop, cal_interface, cal_account):
+        self.log = log
+        self.loop = loop
+        self.cal = cal_interface
+        self.account = cal_account
+
+        attrs = dict(
+            log=self.log,
+            loop=self.loop,
+            cal=cal_interface,
+            account=cal_account,
+            # Create an executor with a single worker to prevent
+            # having multiple simulteneous calls into CAL (which is not threadsafe)
+            executor=concurrent.futures.ThreadPoolExecutor(1)
+            )
+
+        def mk_attrs(cal_method, input_params=None, output_params=None):
+            new_attrs = {
+                    "cal_method": cal_method,
+                    "input_params": input_params,
+                    "output_params": output_params
+                    }
+            new_attrs.update(attrs)
+
+            return new_attrs
+
+        super(CalProxyApp, self).__init__([
+            (r"/api/get_image_list", CalRequestHandler,
+                mk_attrs(
+                    cal_method="get_image_list",
+                    output_params=[
+                        RPCParam("images", "VimResources"),
+                        ]
+                    ),
+                ),
+
+            (r"/api/create_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="create_image",
+                    input_params=[
+                        RPCParam("image", "ImageInfoItem"),
+                        ],
+                    output_params=[
+                        RPCParam("image_id"),
+                        ]
+                    ),
+                ),
+
+            (r"/api/delete_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="delete_image",
+                    input_params=[
+                        RPCParam("image_id"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/get_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="get_image",
+                    input_params=[
+                        RPCParam("image_id"),
+                        ],
+                    output_params=[
+                        RPCParam("image", "ImageInfoItem"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/create_vm", CalRequestHandler,
+                mk_attrs(
+                    cal_method="create_vm",
+                    input_params=[
+                        RPCParam("vm", "VMInfoItem"),
+                        ],
+                    output_params=[
+                        RPCParam("vm_id"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/start_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="start_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/stop_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="stop_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/reboot_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="reboot_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vm_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vm_list",
+                        output_params=[
+                            RPCParam("vms", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        output_params=[
+                            RPCParam("vms", "VMInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_flavor",
+                        input_params=[
+                            RPCParam("flavor", "FlavorInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_flavor",
+                        input_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_flavor_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_flavor_list",
+                        output_params=[
+                            RPCParam("flavors", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_flavor",
+                        input_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        output_params=[
+                            RPCParam("flavor", "FlavorInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_network",
+                        input_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("network_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_network",
+                        input_params=[
+                            RPCParam("network_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_network",
+                        input_params=[
+                            RPCParam("network_id"),
+                            ],
+                        output_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_network_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_network_list",
+                        output_params=[
+                            RPCParam("networks", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_management_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_management_network",
+                        output_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_port",
+                        input_params=[
+                            RPCParam("port", "PortInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("port_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_port",
+                        input_params=[
+                            RPCParam("port_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_port",
+                        input_params=[
+                            RPCParam("port_id"),
+                            ],
+                        output_params=[
+                            RPCParam("port", "PortInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_port_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_port_list",
+                        output_params=[
+                            RPCParam("ports", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_virtual_link",
+                        input_params=[
+                            RPCParam("link_params", "VirtualLinkReqParams"),
+                            ],
+                        output_params=[
+                            RPCParam("link_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_virtual_link",
+                        input_params=[
+                            RPCParam("link_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_virtual_link",
+                        input_params=[
+                            RPCParam("link_id"),
+                            ],
+                        output_params=[
+                            RPCParam("response", "VirtualLinkInfoParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_virtual_link_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_virtual_link_list",
+                        output_params=[
+                            RPCParam("resources", "VNFResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_vdu",
+                        input_params=[
+                            RPCParam("vdu_params", "VDUInitParams"),
+                            ],
+                        output_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/modify_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="modify_vdu",
+                        input_params=[
+                            RPCParam("vdu_params", "VDUModifyParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_vdu",
+                        input_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vdu",
+                        input_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        output_params=[
+                            RPCParam("response", "VDUInfoParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vdu_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vdu_list",
+                        output_params=[
+                            RPCParam("resources", "VNFResources"),
+                            ],
+                        ),
+                    )
+            ])
+
+
+class RwCalProxyTasklet(rift.tasklets.Tasklet):
+    HTTP_PORT = 9002
+    cal_interface = None
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+        self.app = None
+        self.server = None
+
+    def get_cal_interface(self):
+        if RwCalProxyTasklet.cal_interface is None:
+            plugin = rw_peas.PeasPlugin('rwcal_cloudsim', 'RwCal-1.0')
+            engine, info, extension = plugin()
+
+            RwCalProxyTasklet.cal_interface = plugin.get_interface("Cloud")
+            RwCalProxyTasklet.cal_interface.init(self.log_hdl)
+
+        return RwCalProxyTasklet.cal_interface
+
+    def start(self):
+        """Tasklet entry point"""
+        self.log.setLevel(logging.DEBUG)
+
+        super().start()
+
+        cal = self.get_cal_interface()
+        account = RwcalYang.CloudAccount(account_type="cloudsim")
+
+        self.app = CalProxyApp(self.log, self.loop, cal, account)
+        self._dts = rift.tasklets.DTS(
+                self.tasklet_info,
+                RwcalYang.get_schema(),
+                self.loop,
+                self.on_dts_state_change
+                )
+
+        io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
+        self.server = tornado.httpserver.HTTPServer(
+                self.app,
+                io_loop=io_loop,
+                )
+
+        self.log.info("Starting Cal Proxy Http Server on port %s",
+                      RwCalProxyTasklet.HTTP_PORT)
+        self.server.listen(RwCalProxyTasklet.HTTP_PORT)
+
+    def stop(self):
+      try:
+         self.server.stop()
+         self._dts.deinit()
+      except Exception:
+         print("Caught Exception in LP stop:", sys.exc_info()[0])
+         raise
+
+    @asyncio.coroutine
+    def init(self):
+        pass
+
+    @asyncio.coroutine
+    def run(self):
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Take action according to current dts state to transition
+        application into the corresponding application state
+
+        Arguments
+            state - current dts state
+        """
+
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self._dts.handle.set_state(next_state)
diff --git a/rwcal/plugins/rwcalproxytasklet/rwcalproxytasklet.py b/rwcal/plugins/rwcalproxytasklet/rwcalproxytasklet.py
new file mode 100644
index 0000000..c0a9c3f
--- /dev/null
+++ b/rwcal/plugins/rwcalproxytasklet/rwcalproxytasklet.py
@@ -0,0 +1,29 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwcalproxytasklet
+
+
+class Tasklet(rift.tasklets.rwcalproxytasklet.RwCalProxyTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwcal/plugins/vala/CMakeLists.txt b/rwcal/plugins/vala/CMakeLists.txt
new file mode 100644
index 0000000..3482277
--- /dev/null
+++ b/rwcal/plugins/vala/CMakeLists.txt
@@ -0,0 +1,75 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf / Anil Gunturu
+# Creation Date: 05/22/2014
+# 
+
+##
+# Allow specific compiler warnings
+##
+rift_allow_compiler_warning(unused-but-set-variable)
+
+set(VALA_NAME rwcal)
+set(VALA_FILES ${VALA_NAME}.vala)
+set(VALA_VERSION 1.0)
+set(VALA_RELEASE 1)
+set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION})
+set(VALA_TYPELIB_PREFIX RwCal-${VALA_VERSION})
+
+rift_add_vala(
+  ${VALA_LONG_NAME}
+  VALA_FILES ${VALA_FILES}
+  VALA_PACKAGES
+    rw_types-1.0 rw_yang-1.0 rw_keyspec-1.0 rw_yang_pb-1.0 rw_schema_proto-1.0
+    rw_log_yang-1.0 rw_base_yang-1.0 rwcal_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0
+    rw_log-1.0
+  VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwvcs/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwlog/src
+  GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwvcs/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwlog/src
+  GENERATE_HEADER_FILE ${VALA_NAME}.h
+  GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
+  GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
+  GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
+  GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
+  DEPENDS rwcal_yang rwlog_gi rwschema_yang rwmanifest_yang
+  )
+
+rift_install_vala_artifacts(
+  HEADER_FILES ${VALA_NAME}.h
+  SO_FILES lib${VALA_LONG_NAME}.so
+  VAPI_FILES ${VALA_LONG_NAME}.vapi
+  GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
+  TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
+  COMPONENT ${PKG_LONG_NAME}
+  DEST_PREFIX .
+  )
+
+
+set(subdirs
+  rwcal_cloudsim
+  rwcal_cloudsimproxy
+  rwcal_mock
+  rwcal_openstack
+  rwcal_openmano
+  rwcal_aws
+  rwcal_openmano_vimconnector
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwcal/plugins/vala/Makefile b/rwcal/plugins/vala/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwcal/plugins/vala/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal.vala b/rwcal/plugins/vala/rwcal.vala
new file mode 100644
index 0000000..a14388e
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal.vala
@@ -0,0 +1,248 @@
+namespace RwCal {
+
+  public class RwcalStatus : GLib.Object {
+    public RwTypes.RwStatus status;
+    public string error_msg;
+    public string traceback;
+  }
+
+  public interface Cloud: GLib.Object {
+    /*
+     * Init routine
+     */
+    public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx);
+
+    /*
+     * Cloud Account Credentails Validation related API
+     */
+    public abstract RwTypes.RwStatus validate_cloud_creds(
+      Rwcal.CloudAccount account,
+      out Rwcal.CloudConnectionStatus status);
+
+    /*
+     * Image related APIs
+     */
+    public abstract RwTypes.RwStatus get_image_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources images);
+
+    public abstract RwTypes.RwStatus create_image(
+      Rwcal.CloudAccount account,
+      Rwcal.ImageInfoItem image,
+      out string image_id);
+
+    public abstract RwTypes.RwStatus delete_image(
+      Rwcal.CloudAccount account,
+      string image_id);
+
+    public abstract RwTypes.RwStatus get_image(
+        Rwcal.CloudAccount account,
+        string image_id,
+        out Rwcal.ImageInfoItem image);
+
+    /*
+     * VM Releated APIs
+     */
+    public abstract RwTypes.RwStatus create_vm(
+      Rwcal.CloudAccount account,
+      Rwcal.VMInfoItem vm,
+      out string vm_id);
+
+    public abstract RwTypes.RwStatus start_vm(
+      Rwcal.CloudAccount account,
+      string vm_id);
+
+    public abstract RwTypes.RwStatus stop_vm(
+      Rwcal.CloudAccount account,
+      string vm_id);
+
+    public abstract RwTypes.RwStatus delete_vm(
+      Rwcal.CloudAccount account,
+      string vm_id);
+
+    public abstract RwTypes.RwStatus reboot_vm(
+      Rwcal.CloudAccount account,
+      string vm_id);
+
+    public abstract RwTypes.RwStatus get_vm_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources vms);
+
+    public abstract RwTypes.RwStatus get_vm(
+      Rwcal.CloudAccount account,
+      string vm_id,
+      out Rwcal.VMInfoItem vm);
+
+    /*
+     * Flavor related APIs
+     */
+    public abstract RwTypes.RwStatus create_flavor(
+      Rwcal.CloudAccount account,
+      Rwcal.FlavorInfoItem flavor_info_item,
+      out string flavor_id);
+
+    public abstract RwTypes.RwStatus delete_flavor(
+      Rwcal.CloudAccount account,
+      string flavor_id);
+
+    public abstract RwTypes.RwStatus get_flavor_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources flavors);
+
+    public abstract RwTypes.RwStatus get_flavor(
+      Rwcal.CloudAccount account,
+      string flavor_id,
+      out Rwcal.FlavorInfoItem flavor);
+
+
+    /*
+     * Tenant related APIs
+     */
+    public abstract RwTypes.RwStatus create_tenant(
+      Rwcal.CloudAccount account,
+      string tenant_name,
+      [CCode (array_length = false, array_null_terminated = true)]
+      out string [] tenant_info);
+
+    public abstract RwTypes.RwStatus delete_tenant(
+      Rwcal.CloudAccount account,
+      string tenant_id);
+
+    public abstract RwTypes.RwStatus get_tenant_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources tenants);
+
+    /*
+     * Role related APIs
+     */
+    public abstract RwTypes.RwStatus create_role(
+      Rwcal.CloudAccount account,
+      string role_name,
+      [CCode (array_length = false, array_null_terminated = true)]
+      out string [] role_info);
+
+    public abstract RwTypes.RwStatus delete_role(
+      Rwcal.CloudAccount account,
+      string role_id);
+
+    public abstract RwTypes.RwStatus get_role_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources roles);
+
+    /*
+     * Port related APIs
+     */
+    public abstract RwTypes.RwStatus create_port(
+      Rwcal.CloudAccount account,
+      Rwcal.PortInfoItem port,
+      out string port_id);
+
+    public abstract RwTypes.RwStatus delete_port(
+      Rwcal.CloudAccount account,
+      string port_id);
+
+    public abstract RwTypes.RwStatus get_port(
+      Rwcal.CloudAccount account,
+      string port_id,
+      out Rwcal.PortInfoItem port);
+
+    public abstract RwTypes.RwStatus get_port_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources ports);
+
+    /*
+     * Host related APIs
+     */
+    public abstract RwTypes.RwStatus add_host(
+      Rwcal.CloudAccount account,
+      Rwcal.HostInfoItem host,
+      out string host_id);
+
+    public abstract RwTypes.RwStatus remove_host(
+      Rwcal.CloudAccount account,
+      string host_id);
+
+    public abstract RwTypes.RwStatus get_host(
+      Rwcal.CloudAccount account,
+      string host_id,
+      out Rwcal.HostInfoItem host);
+
+    public abstract RwTypes.RwStatus get_host_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources hosts);
+
+    /*
+     * Network related APIs
+     */
+    public abstract RwTypes.RwStatus create_network(
+      Rwcal.CloudAccount account,
+      Rwcal.NetworkInfoItem network,
+      out string network_id);
+
+    public abstract RwTypes.RwStatus delete_network(
+      Rwcal.CloudAccount account,
+      string network_id);
+
+    public abstract RwTypes.RwStatus get_network(
+      Rwcal.CloudAccount account,
+      string network_id,
+      out Rwcal.NetworkInfoItem network);
+
+    public abstract RwTypes.RwStatus get_network_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources networks);
+
+    public abstract RwTypes.RwStatus get_management_network(
+      Rwcal.CloudAccount account,
+      out Rwcal.NetworkInfoItem network);
+
+    /*
+     * Higher Order CAL APIs
+     */
+    public abstract void create_virtual_link(
+      Rwcal.CloudAccount account,
+      Rwcal.VirtualLinkReqParams link_params,
+      out RwcalStatus status,
+      out string link_id);
+    
+    public abstract RwTypes.RwStatus delete_virtual_link(
+      Rwcal.CloudAccount account,
+      string link_id);
+
+    public abstract RwTypes.RwStatus get_virtual_link(
+      Rwcal.CloudAccount account,
+      string link_id,
+      out Rwcal.VirtualLinkInfoParams response);
+
+    public abstract RwTypes.RwStatus get_virtual_link_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VNFResources resources);
+
+
+    public abstract void create_vdu(
+      Rwcal.CloudAccount account,
+      Rwcal.VDUInitParams vdu_params,
+      out RwcalStatus status,
+      out string vdu_id);
+
+    public abstract RwTypes.RwStatus modify_vdu(
+      Rwcal.CloudAccount account,
+      Rwcal.VDUModifyParams vdu_params);
+    
+    public abstract RwTypes.RwStatus delete_vdu(
+      Rwcal.CloudAccount account,
+      string vdu_id);
+
+    public abstract RwTypes.RwStatus get_vdu(
+      Rwcal.CloudAccount account,
+      string vdu_id,
+      out Rwcal.VDUInfoParams response);
+    
+    public abstract RwTypes.RwStatus get_vdu_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VNFResources resources);
+    
+  }
+}
+
+
diff --git a/rwcal/plugins/vala/rwcal_aws/CMakeLists.txt b/rwcal/plugins/vala/rwcal_aws/CMakeLists.txt
new file mode 100644
index 0000000..76430b1
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_aws/CMakeLists.txt
@@ -0,0 +1,37 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+set(PKG_NAME rwcal-aws)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+rift_install_python_plugin(rwcal_aws rwcal_aws.py)
+
+rift_python_install_tree(
+  FILES
+    rift/rwcal/aws/__init__.py
+    rift/rwcal/aws/aws_table.py
+    rift/rwcal/aws/aws_drv.py
+    rift/rwcal/aws/exceptions.py
+    rift/rwcal/aws/prepare_vm.py
+    rift/rwcal/aws/delete_vm.py
+  PYTHON3_ONLY
+  COMPONENT ${PKG_LONG_NAME})
+
diff --git a/rwcal/plugins/vala/rwcal_aws/Makefile b/rwcal/plugins/vala/rwcal_aws/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_aws/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/__init__.py b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/__init__.py
new file mode 100644
index 0000000..4ce1fa2
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/__init__.py
@@ -0,0 +1 @@
+from .aws_drv import AWSDriver
diff --git a/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_drv.py b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_drv.py
new file mode 100644
index 0000000..2c47279
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_drv.py
@@ -0,0 +1,974 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import boto3
+import botocore
+from . import aws_table
+from . import exceptions
+
+import logging
+logger = logging.getLogger('rwcal.aws.drv')
+logger.setLevel(logging.DEBUG)
+
+class AWSDriver(object):
+    """
+    Driver for AWS
+    """
+    def __init__(self, key, secret, region,ssh_key=None,vpcid = None,availability_zone = None,default_subnet_id = None):
+        """
+          Constructor for AWSDriver
+          Arguments:
+             key    : AWS user access key
+             secret : AWS user access secret
+             region : AWS region
+             ssh_key: Name of key pair to connect to EC2 instance
+             vpcid  : VPC ID for the resources
+             availability_zone: Avaialbility zone to allocate EC2 instance.
+             default_subnet_id: Default subnet id to be used for the EC2 instance interfaces at instance creation time
+          Returns: AWS Driver Object 
+        """
+        self._access_key    = key
+        self._access_secret = secret
+        self._region        = region
+        self._availability_zone =  availability_zone
+        self._ssh_key       = ssh_key
+        
+        self._sess  = boto3.session.Session(aws_access_key_id = self._access_key,
+                                            aws_secret_access_key = self._access_secret,
+                                            region_name = self._region)
+        self._ec2_resource_handle = self._sess.resource(service_name = 'ec2')
+        self._s3_handle  = self._sess.resource(service_name = 's3')
+        self._iam_handle = self._sess.resource(service_name = 'iam')
+
+        self._acct_arn = self._iam_handle.CurrentUser().arn
+        self._account_id = self._acct_arn.split(':')[4]
+        # If VPC id is not passed; use default VPC for the account 
+        if vpcid is None:
+            self._vpcid = self._default_vpc_id
+        else:
+            self._vpcid  = vpcid
+
+        self._default_subnet_id = default_subnet_id 
+        # If default_subnet_is is not passed; get default subnet for AZ.
+        # We use this to create first network interface during instance creation time. This subnet typically should have associate public address 
+        # to get public address.  
+        if default_subnet_id is None:
+            self._default_subnet_id = self._get_default_subnet_id_for_az 
+           
+       
+    @property
+    def default_subnet_id(self):
+        """
+           Returns default subnet id for account
+        """
+        return self._default_subnet_id
+
+    @property
+    def _ec2_client_handle(self):
+        """
+        Low level EC2 client connection handle
+           Arguments: None
+           Returns: EC2 Client Connection Handle
+        """
+        return self._ec2_resource_handle.meta.client
+
+    @property
+    def _default_vpc_id(self):
+        """
+        Method to get Default VPC ID
+          Arguments: None
+          Returns: Default EC2.Vpc Resource ID for AWS account
+        """
+        return self._default_vpc.vpc_id
+
+    @property
+    def _default_vpc(self):
+        """
+        Method to get Default VPC Resource Object
+           Arguments: None
+           Returns: Default EC2.Vpc Resource for AWS account
+        """
+        try:
+           response = list(self._ec2_resource_handle.vpcs.all())
+        except Exception as e:
+            logger.error("AWSDriver: Get of Default VPC failed with exception: %s" %(repr(e)))
+            raise
+        default_vpc = [vpc for vpc in response if vpc.is_default]
+        assert(len(default_vpc) == 1)
+        return default_vpc[0]
+
+    def _get_vpc_info(self,VpcId):
+        """
+        Get Vpc resource for specificed VpcId
+          Arguments:
+            - VpcId (String) : VPC ID  
+          Returns: EC2.Vpc Resouce
+        """ 
+        VpcIds = list()
+        VpcIds.append(VpcId)
+        response = list(self._ec2_resource_handle.vpcs.filter(
+                                               VpcIds = VpcIds))
+        if response:
+            assert(len(response) == 1)
+            return response[0]
+        return None
+
+
+    def upload_image(self, **kwargs):
+        """
+        Upload image to s3
+          Arguments: **kwargs -- dictionary
+               {
+                 'image_path'          : File location for the image,
+                 'image_prefix'        : Name-Prefix of the image on S3 
+                 'public_key'          : The path to the user's PEM encoded RSA public key certificate file,
+                 'private_key'         : The path to the user's PEM encoded RSA private key file,
+                 'arch'                : One of ["i386", "x86_64"],
+                 's3_bucket'           : Name of S3 bucket where this image should be uploaded
+                                         (e.g. 'Rift.Cal' or 'Rift.VNF' or 'Rift.3rdPartyVM' etc)
+                 'kernelId'            : Id of the default kernel to launch the AMI with (OPTIONAL)
+                 'ramdiskId'           : Id of the default ramdisk to launch the AMI with (OPTIONAL)
+                 'block_device_mapping : block_device_mapping string  (OPTIONAL)
+                                         Default block-device-mapping scheme to launch the AMI with. This scheme
+                                         defines how block devices may be exposed to an EC2 instance of this AMI
+                                         if the instance-type of the instance is entitled to the specified device.
+                                         The scheme is a comma-separated list of key=value pairs, where each key
+                                         is a "virtual-name" and each value, the corresponding native device name
+                                         desired. Possible virtual-names are:
+                                         - "ami": denotes the root file system device, as seen by the instance.
+                                         - "root": denotes the root file system device, as seen by the kernel.
+                                         - "swap": denotes the swap device, if present.
+                                         - "ephemeralN": denotes Nth ephemeral store; N is a non-negative integer.
+                                          Note that the contents of the AMI form the root file system. Samples of
+                                          block-device-mappings are:
+                                          '"ami=sda1","root=/dev/sda1","ephemeral0=sda2","swap=sda3"'
+                                          '"ami=0","root=/dev/dsk/c0d0s0","ephemeral0=1"'
+               }
+          Returns: None
+        """
+        import subprocess
+        import tempfile
+        import os
+        import shutil
+        
+        CREATE_BUNDLE_CMD  = 'ec2-bundle-image --cert {public_key} --privatekey {private_key} --user {account_id} --image {image_path} --prefix {image_prefix} --arch {arch}'
+        UPLOAD_BUNDLE_CMD  = 'ec2-upload-bundle --bucket {bucket} --access-key {key} --secret-key {secret} --manifest {manifest} --region {region} --retry'
+        
+        cmdline = CREATE_BUNDLE_CMD.format(public_key    = kwargs['public_key'],
+                                           private_key   = kwargs['private_key'],
+                                           account_id    = self._account_id,
+                                           image_path    = kwargs['image_path'],
+                                           image_prefix  = kwargs['image_prefix'],
+                                           arch          = kwargs['arch'])
+        
+        if 'kernelId' in kwargs:
+            cmdline += (' --kernel ' + kwargs['kernelId'])
+
+        if 'ramdiskId' in kwargs:
+            cmdline += (' --ramdisk ' + kwargs['ramdiskId'])
+            
+        if 'block_device_mapping' in kwargs:
+            cmdline += ' --block-device-mapping ' + kwargs['block_device_mapping']
+
+        ### Create Temporary Directory
+        try:
+            tmp_dir = tempfile.mkdtemp()
+        except Exception as e:
+            logger.error("Failed to create temporary directory. Exception Details: %s" %(repr(e)))
+            raise
+
+        cmdline += (" --destination " + tmp_dir)
+        logger.info('AWSDriver: Executing ec2-bundle-image command. Target directory name: %s. This command may take a while...\n' %(tmp_dir))
+        result = subprocess.call(cmdline.split())
+        if result == 0:
+            logger.info('AWSDriver: ec2-bundle-image command succeeded')
+        else:
+            logger.error('AWSDriver: ec2-bundle-image command failed. Return code %d. CMD: %s'%(result, cmdline))
+            raise OSError('AWSDriver: ec2-bundle-image command failed. Return code %d' %(result))
+        
+        logger.info('AWSDriver: Initiating image upload. This may take a while...')
+
+        cmdline = UPLOAD_BUNDLE_CMD.format(bucket   = kwargs['s3_bucket'],
+                                           key      = self._access_key,
+                                           secret   = self._access_secret,
+                                           manifest = tmp_dir+'/'+kwargs['image_prefix']+'.manifest.xml',
+                                           region   = self._region)
+        result = subprocess.call(cmdline.split())
+        if result == 0:
+            logger.info('AWSDriver: ec2-upload-bundle command succeeded')
+        else:
+            logger.error('AWSDriver: ec2-upload-bundle command failed. Return code %d. CMD: %s'%(result, cmdline))
+            raise OSError('AWSDriver: ec2-upload-bundle command failed. Return code %d' %(result))
+        ### Delete the temporary directory
+        logger.info('AWSDriver: Deleting temporary directory and other software artifacts')
+        shutil.rmtree(tmp_dir, ignore_errors = True)
+        
+                     
+    def register_image(self, **kwargs):
+        """
+        Registers an image uploaded to S3 with EC2
+           Arguments: **kwargs -- dictionary
+             {
+                Name (string)         : Name of the image
+                ImageLocation(string) : Location of image manifest file in S3 (e.g. 'rift.cal.images/test-img.manifest.xml')
+                Description(string)   : Description for the image (OPTIONAL)
+                Architecture (string) : Possible values 'i386' or 'x86_64' (OPTIONAL)
+                KernelId(string)      : Kernel-ID Refer: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs (OPTIONAL)
+                RamdiskId(string)     : Ramdisk-ID Refer: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs (OPTIONAL)
+                RootDeviceName(string): The name of the root device (for example, /dev/sda1 , or /dev/xvda ) (OPTIONAL)
+                BlockDeviceMappings(list) : List of dictionary of block device mapping (OPTIONAL)
+                                            [
+                                               {
+                                                 'VirtualName': 'string',
+                                                 'DeviceName': 'string',
+                                                 'Ebs': {
+                                                    'SnapshotId': 'string',
+                                                    'VolumeSize': 123,
+                                                    'DeleteOnTermination': True|False,
+                                                    'VolumeType': 'standard'|'io1'|'gp2',
+                                                    'Iops': 123,
+                                                    'Encrypted': True|False
+                                                 },
+                                                 'NoDevice': 'string'
+                                              },
+                                            ]
+                VirtualizationType(string): The type of virtualization (OPTIONAL)
+                                           Default: paravirtual
+                SriovNetSupport(string): (OPTIONAL)
+                       Set to ``simple`` to enable enhanced networking for the AMI and any instances that are launched from the AMI.
+                       This option is supported only for HVM AMIs. Specifying this option with a PV AMI can make instances launched from the AMI unreachable.
+        
+          Returns:
+             image_id: UUID of the image
+        """
+
+        kwargs['DryRun'] = False
+        try:
+            response = self._ec2_client_handle.register_image(**kwargs)
+        except Exception as e:
+            logger.error("AWSDriver: List image operation failed with exception: %s" %(repr(e)))
+            raise
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+        return response['ImageId']
+        
+
+    def deregister_image(self, ImageId):
+        """
+        DeRegisters image from EC2.
+          Arguments:
+            - ImageId (string): ImageId generated by AWS in register_image call
+          Returns: None
+        """
+        try:
+            response = self._ec2_client_handle.deregister_image(
+                                                         ImageId = ImageId)
+        except Exception as e:
+            logger.error("AWSDriver: deregister_image operation failed with exception: %s" %(repr(e)))
+            raise
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+        
+    def get_image(self, ImageId):
+        """
+        Returns a dictionary object describing the Image identified by ImageId
+        """
+        try:
+            response = list(self._ec2_resource_handle.images.filter(ImageIds = [ImageId]))
+        except Exception as e:
+            logger.error("AWSDriver: List image operation failed with exception: %s" %(repr(e)))
+            raise
+        return response[0]
+        
+    def list_images(self):
+        """
+        Returns list of dictionaries. Each dictionary contains attributes associated with image
+           Arguments: None
+           Returns: List of dictionaries.
+        """
+        try:
+            response = list(self._ec2_resource_handle.images.filter(Owners = [self._account_id]))
+        except Exception as e:
+            logger.error("AWSDriver: List image operation failed with exception: %s" %(repr(e)))
+            raise
+        return response
+
+    def create_image_from_instance(self,InstanceId,ImageName,VolumeSize = 16):
+        """
+        Creates AWS AMI from the instance root device Volume and registers the same
+        Caller is expected to stop the instance and restart the instance if required 
+        Arguments:
+           - InstanceId (String) : AWS EC2 Instance Id
+           - ImageName (String)  : Name for AMI
+         Returns
+           - AWS AMI Image Id
+        """
+
+        try:
+            inst = self.get_instance(InstanceId)
+            # Find Volume Id of Root Device
+            if inst.root_device_type == 'ebs':
+                for dev in inst.block_device_mappings:
+                    if inst.root_device_name == dev['DeviceName']:
+                        volume_id = dev['Ebs']['VolumeId']
+                        break
+
+                rsp=self._ec2_resource_handle.create_snapshot(VolumeId=volume_id)
+                snapshot_id = rsp.id
+
+                #Wait for the snapshot to be completed
+                attempts = 0
+                while attempts < 2:
+                    try:
+                        attempts = attempts + 1
+                        waiter = self._ec2_client_handle.get_waiter('snapshot_completed')
+                        waiter.wait(SnapshotIds=[snapshot_id])
+                    except botocore.exceptions.WaiterError as e:
+                        logger.error("AWSDriver: Create Snapshot for image still not completed. Will wait for another iteration") 
+                        continue
+                    except Exception as e:
+                        logger.error("AWSDriver: Createing Snapshot for instance failed during image creation: %s", (repr(e)))
+                        raise
+                    break
+                  
+                logger.debug("AWSDriver: Snapshot %s completed successfully from instance %s",snapshot_id,InstanceId)
+                image_id = self.register_image(Name=ImageName,VirtualizationType='hvm',
+                                               RootDeviceName='/dev/sda1',SriovNetSupport='simple',
+                                               BlockDeviceMappings=[{'DeviceName':'/dev/sda1',
+                                               'Ebs':{'SnapshotId':snapshot_id,'VolumeSize': VolumeSize,
+                                               'VolumeType': 'standard', 'DeleteOnTermination': True}}],
+                                               Architecture='x86_64')
+                return image_id
+            else:
+                logger.error("AWSDriver: Create Image failed as Instance Root device Type should be ebs to create image") 
+                raise exceptions.RWErrorFailure("AWSDriver: Create Image failed as Instance Root device Type should be ebs to create image")
+        except Exception as e:
+            logger.error("AWSDriver: Createing image from instance failed with exception: %s", (repr(e)))
+            raise
+        
+    def list_instances(self):
+        """
+        Returns list of resource object representing EC2 instance.
+           Arguments: None
+           Returns:  List of EC2.Instance object
+        """
+        instance_list = []
+        try:
+            # Skip Instances in terminated state
+            response = self._ec2_resource_handle.instances.filter(
+                                                           Filters = [
+                                                               { 'Name': 'instance-state-name',
+                                                                 'Values': ['pending',
+                                                                            'running',
+                                                                            'shutting-down',
+                                                                            'stopping',
+                                                                            'stopped']
+                                                            }
+                                                           ])
+        except Exception as e:
+            logger.error("AWSDriver: List instances operation failed with exception: %s" %(repr(e)))
+            raise
+        for instance in response:
+             instance_list.append(instance)
+        return instance_list
+
+    def get_instance(self, InstanceId):
+        """
+        Returns a EC2 resource Object describing the Instance identified by InstanceId
+           Arguments:
+             - InstnaceId (String) : MANDATORY, EC2 Instance Id
+           Returns: EC2.Instance object
+        """
+
+        try:
+            instance = list(self._ec2_resource_handle.instances.filter(
+                                                           InstanceIds = [InstanceId]))
+        except Exception as e:
+            logger.error("AWSDriver: Get instances operation failed with exception: %s" %(repr(e)))
+            raise
+        if len(instance) == 0:
+            logger.error("AWSDriver: instance with id %s not avaialble" %InstanceId)
+            raise exceptions.RWErrorNotFound("AWSDriver: instance with id %s not avaialble" %InstanceId)
+        elif len(instance) > 1:
+            logger.error("AWSDriver: Duplicate instances with id %s is avaialble" %InstanceId)
+            raise exceptions.RWErrorDuplicate("AWSDriver: Duplicate instances with id %s is avaialble" %InstanceId)
+        return instance[0] 
+
+    def create_instance(self,**kwargs):
+        """
+         Create an EC2instance.
+            Arguments: **kwargs -- dictionary
+               {
+                  ImageId (string): MANDATORY, Id of AMI to create instance 
+                  SubetId (string): Id of Subnet to start EC2 instance. EC2 instance will be started in VPC subnet resides. 
+                                    Default subnet from account used if not present
+                  InstanceType(string): AWS Instance Type name. Default: t2.micro
+                  SecurityGroupIds: AWS Security Group Id to associate with the instance. Default from VPC used if not present
+                  KeyName (string): Key pair name. Default key pair from account used if not present 
+                  MinCount (Integer): Minimum number of instance to start. Default: 1
+                  MaxCount (Integer): Maximum number of instance to start. Default: 1
+                  Placement (Dict) : Dictionary having Placement group details
+                                     {AvailabilityZone (String): AZ to create the instance}
+                  UserData (string) : cloud-init config file 
+               }
+            Returns: List of EC2.Instance object
+        """ 
+
+        if 'ImageId' not in kwargs:
+            logger.error("AWSDriver: Mandatory parameter ImageId not available during create_instance")
+            raise AttributeError("Mandatory parameter ImageId not available during create_instance")
+
+        #Validate image exists and is avaialble
+        try:
+            image_res = self._ec2_resource_handle.Image(kwargs['ImageId'])
+            image_res.load() 
+        except Exception as e:
+            logger.error("AWSDriver: Image with id %s not available and failed with exception: %s",kwargs['ImageId'],(repr(e)))
+            raise AttributeError("AWSDriver: Image with id %s not available and failed with exception: %s",kwargs['ImageId'],(repr(e)))
+        if image_res.state != 'available':
+            logger.error("AWSDriver: Image state is not available for image with id %s; Current state is %s",
+                         image_res.id,image_res.state)
+            raise AttributeError("ImageId is not valid")
+
+        # If MinCount or MaxCount is not passed set them to default of 1
+        if 'MinCount' not in kwargs:
+            kwargs['MinCount'] = 1  
+        if 'MaxCount' not in kwargs:
+            kwargs['MaxCount'] = kwargs['MinCount'] 
+
+        if 'KeyName' not in kwargs:
+            if not self._ssh_key:
+                logger.error("AWSDriver: Key not available during create_instance to allow SSH")
+            else:
+                kwargs['KeyName'] = self._ssh_key
+
+        if 'Placement' not in kwargs and self._availability_zone is not None:
+            placement = {'AvailabilityZone':self._availability_zone}
+            kwargs['Placement'] = placement
+
+        if 'SubnetId' not in kwargs and 'NetworkInterfaces' not in kwargs:
+            if self._default_subnet_id:
+                kwargs['SubnetId'] = self._default_subnet_id
+            else: 
+                logger.error("AWSDriver: Valid subnetid not present during create instance")
+                raise AttributeError("Valid subnet not present during create instance")
+
+        if self._availability_zone and 'SubnetId' in kwargs:
+            subnet = self.get_subnet(SubnetId= kwargs['SubnetId']) 
+            if not subnet:
+                logger.error("AWSDriver: Valid subnet not found for subnetid %s",kwargs['SubnetId'])
+                raise AttributeError("Valid subnet not found for subnetid %s",kwargs['SubnetId'])
+            if subnet.availability_zone != self._availability_zone:
+                logger.error("AWSDriver: AZ of Subnet %s %s doesnt match account AZ %s",kwargs['SubnetId'],
+                                       subnet.availability_zone,self._availability_zone)
+                raise AttributeError("AWSDriver: AZ of Subnet %s %s doesnt match account AZ %s",kwargs['SubnetId'],
+                                       subnet.availability_zone,self._availability_zone)
+
+        # If instance type is not passed; use t2.micro as default
+        if 'InstanceType' not in kwargs or kwargs['InstanceType'] is None:
+               kwargs['InstanceType'] = 't2.micro'
+        inst_type =  kwargs['InstanceType']
+        if inst_type not in aws_table.INSTANCE_TYPES.keys():
+            logger.error("AWSDriver: Invalid instance type %s used",inst_type)
+            raise AttributeError('InstanceType %s is not valid' %inst_type)
+
+        #validate instance_type for AMI 
+        if image_res.sriov_net_support == 'simple':
+            if image_res.virtualization_type != 'hvm':
+                logger.error("AWSDriver: Image with id %s has SRIOV net support but virtualization type is not hvm",kwargs['ImageId'])
+                raise AttributeError('Invalid Image with id %s' %kwargs['ImageId'])
+            if aws_table.INSTANCE_TYPES[inst_type]['sriov'] is False:
+                logger.warning("AWSDriver: Image %s support SR-IOV but instance type %s does not support HVM",kwargs['ImageId'],inst_type)
+
+        if image_res.virtualization_type == 'paravirtual' and aws_table.INSTANCE_TYPES[inst_type]['paravirt'] is False:  # Need to check virt type str for PV
+            logger.error("AWSDriver: Image %s requires PV support but instance %s does not support PV",kwargs['ImageId'],inst_type)
+            raise AttributeError('Image %s requires PV support but instance %s does not support PV',kwargs['ImageId'],inst_type)
+
+        if image_res.root_device_type == 'instance-store' and aws_table.INSTANCE_TYPES[inst_type]['disk'] ==  0: 
+            logger.error("AWSDriver: Image %s uses instance-store root device type that is not supported by instance type %s",kwargs['ImageId'],inst_type) 
+            raise AttributeError("AWSDriver: Image %s uses instance-store root device type that is not supported by instance type %s",kwargs['ImageId'],inst_type)
+
+
+        # Support of instance type varies across regions and also based on account. So we are not validating it
+        #if inst_type not in aws_table.REGION_DETAILS[self._region]['instance_types']:
+        #    logger.error("AWSDriver: instance type %s not supported in region %s",inst_type,self._region)
+        #    raise AttributeError("AWSDriver: instance type %s not supported in region %s",inst_type,self._region)
+
+        try:
+            instances = self._ec2_resource_handle.create_instances(**kwargs)
+        except Exception as e:
+            logger.error("AWSDriver: Creating instance failed with exception: %s" %(repr(e)))
+            raise  
+        return instances
+
+    def terminate_instance(self,InstanceId):
+        """
+        Termintae an EC2 instance
+           Arguments:
+            - InstanceId (String): ID of EC2 instance
+           Returns: None
+        """ 
+
+        InstanceIds = InstanceId
+        if type(InstanceIds) is not list:
+            InstanceIds = list()
+            InstanceIds.append(InstanceId)
+
+        try:
+            response = self._ec2_client_handle.terminate_instances(InstanceIds=InstanceIds)
+        except Exception as e:
+            logger.error("AWSDriver: Terminate instance failed with exception: %s" %(repr(e)))
+            raise  
+        return response 
+
+    def stop_instance(self,InstanceId):
+        """
+        Stop an EC2 instance. Stop is supported only for EBS backed instance
+           Arguments:
+            - InstanceId (String): ID of EC2 instance
+           Returns: None
+        """ 
+
+        InstanceIds = InstanceId
+        if type(InstanceIds) is not list:
+            InstanceIds = list()
+            InstanceIds.append(InstanceId)
+
+        try:
+            response = self._ec2_client_handle.stop_instances(InstanceIds=InstanceIds)
+        except Exception as e:
+            logger.error("AWSDriver: Stop for instance %s failed with exception: %s",InstanceId,repr(e))
+            raise  
+        return response 
+
+    def start_instance(self,InstanceId):
+        """
+        Start an EC2 instance. Start is supported only for EBS backed instance
+           Arguments:
+            - InstanceId (String): ID of EC2 instance
+           Returns: None
+        """ 
+
+        InstanceIds = InstanceId
+        if type(InstanceIds) is not list:
+            InstanceIds = list()
+            InstanceIds.append(InstanceId)
+
+        try:
+            response = self._ec2_client_handle.start_instances(InstanceIds=InstanceIds)
+        except Exception as e:
+            logger.error("AWSDriver: Start for instance %s failed with exception: %s",InstanceId,repr(e))
+            raise  
+        return response 
+       
+    @property
+    def _get_default_subnet_id_for_az(self):
+        """
+        Get default subnet id for AWS Driver registered Availability Zone 
+          Arguments: None
+          Returns: SubnetId (String)
+        """ 
+
+        if self._availability_zone:
+            subnet = self._get_default_subnet_for_az(self._availability_zone)
+            return subnet.id
+        else:
+            return None
+
+    def _get_default_subnet_for_az(self,AvailabilityZone):
+        """
+        Get default Subnet for Avaialbility Zone
+           Arguments:
+              - AvailabilityZone (String) : EC2 AZ
+           Returns: EC2.Subnet object
+        """
+
+        AvailabilityZones = [AvailabilityZone]
+        try:
+            response = list(self._ec2_resource_handle.subnets.filter(
+                                                              Filters = [
+                                                               {'Name':'availability-zone',
+                                                                 'Values': AvailabilityZones}]))
+        except Exception as e:
+            logger.error("AWSDriver: Get default subnet for Availability zone failed with exception: %s" %(repr(e)))
+            raise
+        default_subnet = [subnet for subnet in response if subnet.default_for_az is True and subnet.vpc_id == self._vpcid]
+        assert(len(default_subnet) == 1)
+        return default_subnet[0]
+        
+    def get_subnet_list(self,VpcId=None):
+        """
+        List all the subnets
+          Arguments:
+           - VpcId (String) - VPC ID to filter the subnet list
+        Returns: List of EC2.Subnet Object
+        """
+
+        try:
+            VpcIds = VpcId
+            if VpcId is not None:
+                if type(VpcIds) is not list:
+                    VpcIds = list()
+                    VpcIds.append(VpcId)
+                response = list(self._ec2_resource_handle.subnets.filter(
+					       Filters = [
+					       { 'Name': 'vpc-id',
+					       'Values': VpcIds}]))
+            else:
+                response = list(self._ec2_resource_handle.subnets.all())
+        except Exception as e:
+            logger.error("AWSDriver: List subnets operation failed with exception: %s" %(repr(e)))
+            raise
+        return response 
+
+    def get_subnet(self,SubnetId):
+        """
+	Get the subnet for specified SubnetId
+          Arguments:
+             - SubnetId (String) - MANDATORY
+          Returns: EC2.Subnet Object
+	"""
+
+        try:
+            response = list(self._ec2_resource_handle.subnets.filter(SubnetIds=[SubnetId]))
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnetID.NotFound':
+                logger.error("AWSDriver: Get Subnet Invalid SubnetID %s",SubnetId)
+                raise exceptions.RWErrorNotFound("AWSDriver: Delete Subnet Invalid SubnetID %s",SubnetId)
+           else:
+               logger.error("AWSDriver: Creating network interface failed with exception: %s",(repr(e)))
+               raise
+        except Exception as e:
+            logger.error("AWSDriver: Get subnet operation failed with exception: %s" %(repr(e)))
+            raise
+        if len(response) == 0:
+            logger.error("AWSDriver: subnet with id %s is not avaialble" %SubnetId)
+            raise exceptions.RWErrorNotFoun("AWSDriver: subnet with id %s is not avaialble" %SubnetId)
+        elif len(response) > 1: 
+            logger.error("AWSDriver: Duplicate subnet with id %s is avaialble" %SubnetId)
+            raise exceptions.RWErrorDuplicate("AWSDriver: Duplicate subnet with id %s is avaialble" %SubnetId)
+        return response[0] 
+
+    def create_subnet(self,**kwargs):
+        """
+        Create a EC2 subnet based on specified CIDR
+          Arguments:
+             - CidrBlock (String): MANDATORY. CIDR for subnet. CIDR should be within VPC CIDR
+             - VpcId (String): VPC ID to create the subnet. Default AZ from AWS Driver registration used if not present. 
+             - AvailabilityZone (String): Availability zone to create subnet. Default AZ from AWS Driver registration used
+                                          if not present
+          Returns: EC2.Subnet Object 
+        """
+
+        if 'CidrBlock' not in kwargs:
+            logger.error("AWSDriver: Insufficent params for create_subnet. CidrBlock is mandatory parameter")
+            raise AttributeError("AWSDriver: Insufficent params for create_subnet. CidrBlock is mandatory parameter")
+
+        if 'VpcId' not in kwargs:
+            kwargs['VpcId'] = self._vpcid
+        if 'AvailabilityZone' not in kwargs and self._availability_zone is not None:
+            kwargs['AvailabilityZone'] = self._availability_zone
+
+        vpc = self._get_vpc_info(kwargs['VpcId'])
+        if not vpc:
+            logger.error("AWSDriver: Subnet creation failed as VpcId %s does not exist", kwargs['VpcId'])
+            raise exceptions.RWErrorNotFound("AWSDriver: Subnet creation failed as VpcId %s does not exist", kwargs['VpcId'])
+        if vpc.state != 'available':
+            logger.error("AWSDriver: Subnet creation failed as VpcId %s is not in available state. Current state is %s", kwargs['VpcId'],vpc.state)
+            raise exceptions.RWErrorNotConnected("AWSDriver: Subnet creation failed as VpcId %s is not in available state. Current state is %s", kwargs['VpcId'],vpc.state)
+        
+        try:
+            subnet = self._ec2_resource_handle.create_subnet(**kwargs)
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnet.Conflict':
+                logger.error("AWSDriver: Create Subnet for ip %s failed due to overalp with existing subnet in VPC %s",kwargs['CidrBlock'],kwargs['VpcId'])
+                raise exceptions.RWErrorExists("AWSDriver: Create Subnet for ip %s failed due to overalp with existing subnet in VPC %s",kwargs['CidrBlock'],kwargs['VpcId'])
+           elif e.response['Error']['Code'] == 'InvalidSubnet.Range':
+                logger.error("AWSDriver: Create Subnet for ip %s failed as it is not in VPC CIDR range for VPC %s",kwargs['CidrBlock'],kwargs['VpcId'])
+                raise AttributeError("AWSDriver: Create Subnet for ip %s failed as it is not in VPC CIDR range for VPC %s",kwargs['CidrBlock'],kwargs['VpcId'])
+           else:
+               logger.error("AWSDriver: Creating subnet failed with exception: %s",(repr(e)))
+               raise  
+        except Exception as e:
+            logger.error("AWSDriver: Creating subnet failed with exception: %s" %(repr(e)))
+            raise  
+        return subnet
+
+    def modify_subnet(self,SubnetId,MapPublicIpOnLaunch):
+        """
+        Modify a EC2 subnet
+           Arguements: 
+               - SubnetId (String): MANDATORY, EC2 Subnet ID
+               - MapPublicIpOnLaunch (Boolean): Flag to indicate if subnet is associated with public IP 
+        """
+
+        try:
+            response = self._ec2_client_handle.modify_subnet_attribute(SubnetId=SubnetId,MapPublicIpOnLaunch={'Value':MapPublicIpOnLaunch})
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnetID.NotFound':
+                logger.error("AWSDriver: Modify Subnet Invalid SubnetID %s",SubnetId)
+                raise exceptions.RWErrorNotFound("AWSDriver: Modify Subnet Invalid SubnetID %s",SubnetId)
+           else:
+               logger.error("AWSDriver: Modify subnet failed with exception: %s",(repr(e)))
+               raise  
+        except Exception as e:
+            logger.error("AWSDriver: Modify subnet failed with exception: %s",(repr(e)))
+            raise
+
+
+    def delete_subnet(self,SubnetId):
+        """
+        Delete a EC2 subnet
+           Arguements: 
+               - SubnetId (String): MANDATORY, EC2 Subnet ID
+           Returns: None 
+        """
+
+        try:
+            response = self._ec2_client_handle.delete_subnet(SubnetId=SubnetId)
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnetID.NotFound':
+                logger.error("AWSDriver: Delete Subnet Invalid SubnetID %s",SubnetId)
+                raise exceptions.RWErrorNotFound("AWSDriver: Delete Subnet Invalid SubnetID %s",SubnetId)
+           else:
+               logger.error("AWSDriver: Delete subnet failed with exception: %s",(repr(e)))
+               raise  
+        except Exception as e:
+            logger.error("AWSDriver: Delete subnet failed with exception: %s",(repr(e)))
+            raise
+
+    def get_network_interface_list(self,SubnetId=None,VpcId=None,InstanceId = None):
+        """
+        List all the network interfaces
+           Arguments:
+              - SubnetId (String)
+              - VpcId (String)
+              - InstanceId (String)
+           Returns List of EC2.NetworkInterface  
+        """
+
+        try:
+            if InstanceId is not None:
+                InstanceIds = [InstanceId]
+                response = list(self._ec2_resource_handle.network_interfaces.filter(
+					       Filters = [
+					       { 'Name': 'attachment.instance-id',
+                                                 'Values': InstanceIds}]))
+            elif SubnetId is not None:
+                SubnetIds = SubnetId
+                if type(SubnetId) is not list:
+                    SubnetIds = list()
+                    SubnetIds.append(SubnetId)
+                response = list(self._ec2_resource_handle.network_interfaces.filter(
+					       Filters = [
+					       { 'Name': 'subnet-id',
+					       'Values': SubnetIds}]))
+            elif VpcId is not None:
+                VpcIds = VpcId
+                if type(VpcIds) is not list:
+                    VpcIds = list()
+                    VpcIds.append(VpcId)
+                response = list(self._ec2_resource_handle.network_interfaces.filter(
+					       Filters = [
+					       { 'Name': 'vpc-id',
+					       'Values': VpcIds}]))
+            else:
+                response = list(self._ec2_resource_handle.network_interfaces.all())
+        except Exception as e:
+            logger.error("AWSDriver: List network interfaces operation failed with exception: %s" %(repr(e)))
+            raise
+        return response
+
+    def get_network_interface(self,NetworkInterfaceId):
+        """
+	Get the network interface
+          Arguments:
+              NetworkInterfaceId (String): MANDATORY, EC2 Network Interface Id
+         Returns:  EC2.NetworkInterface Object
+	"""
+
+        try:
+            response = list(self._ec2_resource_handle.network_interfaces.filter(NetworkInterfaceIds=[NetworkInterfaceId]))
+        except Exception as e:
+            logger.error("AWSDriver: List Network Interfaces operation failed with exception: %s" %(repr(e)))
+            raise
+        if len(response) == 0:
+            logger.error("AWSDriver: Network interface with id %s is not avaialble" %NetworkInterfaceId)
+            raise exceptions.RWErrorNotFound("AWSDriver: Network interface with id %s is not avaialble" %NetworkInterfaceId)
+        elif len(response) > 1:
+            logger.error("AWSDriver: Duplicate Network interface with id %s is avaialble" %NetworkInterfaceId)
+            raise exceptions.RWErrorDuplicate("AWSDriver: Duplicate Network interface with id %s is avaialble" %NetworkInterfaceId)
+        return response[0] 
+
+    def create_network_interface(self,**kwargs):
+        """
+        Create a network interface in specified subnet 
+          Arguments:
+             - SubnetId (String): MANDATORY, Subnet to create network interface
+          Returns: EC2.NetworkInterface Object
+        """
+
+        if 'SubnetId' not in kwargs:
+            logger.error("AWSDriver: Insufficent params for create_network_inteface . SubnetId is mandatory parameters")
+            raise AttributeError("AWSDriver: Insufficent params for create_network_inteface . SubnetId is mandatory parameters")
+
+        try:
+            interface = self._ec2_resource_handle.create_network_interface(**kwargs)
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnetID.NotFound':
+                logger.error("AWSDriver: Create Network interface failed as subnet %s is not found",kwargs['SubnetId'])
+                raise exceptions.RWErrorNotFound("AWSDriver: Create Network interface failed as subnet %s is not found",kwargs['SubnetId'])
+           else:
+               logger.error("AWSDriver: Creating network interface failed with exception: %s",(repr(e)))
+               raise
+        except Exception as e:
+            logger.error("AWSDriver: Creating network interface failed with exception: %s" %(repr(e)))
+            raise
+        return interface
+
+    def delete_network_interface(self,NetworkInterfaceId):
+        """
+        Delete a network interface
+         Arguments:
+            - NetworkInterfaceId(String): MANDATORY
+         Returns: None
+        """
+        try:
+            response = self._ec2_client_handle.delete_network_interface(NetworkInterfaceId=NetworkInterfaceId)
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidNetworkInterfaceID.NotFound':
+                logger.error("AWSDriver: Delete Network interface not found for interface ID  %s",NetworkInterfaceId)
+                raise exceptions.RWErrorNotFound("AWSDriver: Delete Network interface not found for interface ID  %s",NetworkInterfaceId)
+           else:
+               logger.error("AWSDriver: Delete network interface failed with exception: %s",(repr(e)))
+               raise  
+        except Exception as e:
+            logger.error("AWSDriver: Delete network interface failed with exception: %s",(repr(e)))
+            raise
+
+    def associate_public_ip_to_network_interface(self,NetworkInterfaceId):
+        """
+        Allocate a Elastic IP and associate to network interface
+          Arguments:
+            NetworkInterfaceId (String): MANDATORY
+          Returns: None
+        """
+        try:
+            response = self._ec2_client_handle.allocate_address(Domain='vpc')
+            self._ec2_client_handle.associate_address(NetworkInterfaceId=NetworkInterfaceId,AllocationId = response['AllocationId'])
+        except Exception as e:
+             logger.error("AWSDriver: Associating Public IP to network interface %s failed with exception: %s",NetworkInterfaceId,(repr(e)))
+             raise
+        return response
+
+    def disassociate_public_ip_from_network_interface(self,NetworkInterfaceId):
+        """
+        Disassociate a Elastic IP from network interface and release the same
+          Arguments:
+            NetworkInterfaceId (String): MANDATORY
+          Returns: None
+        """
+        try:
+            interface = self.get_network_interface(NetworkInterfaceId=NetworkInterfaceId) 
+            if interface  and interface.association and 'AssociationId' in interface.association:
+                self._ec2_client_handle.disassociate_address(AssociationId = interface.association['AssociationId'])
+                self._ec2_client_handle.release_address(AllocationId=interface.association['AllocationId'])
+        except Exception as e:
+             logger.error("AWSDriver: Associating Public IP to network interface %s failed with exception: %s",NetworkInterfaceId,(repr(e)))
+             raise
+
+    def attach_network_interface(self,**kwargs):
+        """
+        Attach network interface to running EC2 instance. Used to add additional interfaces to instance
+          Arguments:
+            - NetworkInterfaceId (String):  MANDATORY,
+            - InstanceId(String) :  MANDATORY
+            - DeviceIndex (Integer): MANDATORY
+          Returns: Dict with AttachmentId which is string
+        """
+
+        if 'NetworkInterfaceId' not in kwargs or 'InstanceId' not in kwargs or 'DeviceIndex' not in kwargs:
+            logger.error('AWSDriver: Attach network interface to instance requires NetworkInterfaceId and InstanceId as mandatory parameters')
+            raise AttributeError('AWSDriver: Attach network interface to instance requires NetworkInterfaceId and InstanceId as mandatory parameters')
+
+        try:
+            response = self._ec2_client_handle.attach_network_interface(**kwargs)
+        except Exception as e:
+            logger.error("AWSDriver: Attach network interface failed with exception: %s",(repr(e)))
+            raise
+        return response
+
+    def detach_network_interface(self,**kwargs):
+        """
+        Detach network interface from instance 
+          Arguments:
+            - AttachmentId (String)
+          Returns: None 
+        """
+
+        if 'AttachmentId' not in kwargs:
+            logger.error('AWSDriver: Detach network interface from instance requires AttachmentId as mandatory parameters')
+            raise AttributeError('AWSDriver: Detach network interface from instance requires AttachmentId as mandatory parameters')
+
+        try:
+            response = self._ec2_client_handle.detach_network_interface(**kwargs)
+        except Exception as e:
+            logger.error("AWSDriver: Detach network interface failed with exception: %s",(repr(e)))
+            raise
+
+    def map_flavor_to_instance_type(self,ram,vcpus,disk,inst_types = None):
+        """
+        Method to find a EC2 instance type matching the requested params
+          Arguments:
+             - ram (Integer) : RAM size in MB
+             - vcpus (Integer): VPCU count
+             - disk (Integer): Storage size in GB
+             - inst_types (List): List of string having list of EC2 instance types to choose from
+                                  assumed to be in order of resource size 
+          Returns
+             InstanceType (String) - EC2 Instance Type
+        """
+        if inst_types is None:
+            inst_types = ['c3.large','c3.xlarge','c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge']
+        
+        for inst in inst_types:
+           if inst in aws_table.INSTANCE_TYPES:
+               if ( aws_table.INSTANCE_TYPES[inst]['ram'] > ram and  
+                    aws_table.INSTANCE_TYPES[inst]['vcpu'] > vcpus and 
+                    aws_table.INSTANCE_TYPES[inst]['disk'] > disk):
+                   return inst
+        return 't2.micro'  
+
+    def upload_ssh_key(self,key_name,public_key):
+        """
+        Method to upload Public Key to AWS
+          Arguments:
+            - keyname (String): Name for the key pair
+            - public_key (String): Base 64 encoded public key
+          Returns  None
+        """
+        self._ec2_resource_handle.import_key_pair(KeyName=key_name,PublicKeyMaterial=public_key) 
+
+    def delete_ssh_key(self,key_name):
+        """
+        Method to delete Public Key from AWS
+          Arguments:
+            - keyname (String): Name for the key pair
+          Returns  None
+        """
+        self._ec2_client_handle.delete_key_pair(KeyName=key_name) 
+ 
+             
diff --git a/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_table.py b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_table.py
new file mode 100644
index 0000000..a7349fd
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_table.py
@@ -0,0 +1,463 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+"""
+Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them.
+From http://aws.amazon.com/ec2/instance-types/
+max_inst From http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2 
+paravirt from https://aws.amazon.com/amazon-linux-ami/instance-type-matrix/
+"""
+INSTANCE_TYPES = {
+    'm4.large': {
+        'id': 'm4.large',
+        'name': 'Large Instance',
+        'ram': 8*1024,
+        'vcpu': 2,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm4.xlarge': {
+        'id': 'm4.xlarge',
+        'name': 'Large Instance',
+        'ram': 16*1024,
+        'vcpu': 4,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm4.2xlarge': {
+        'id': 'm4.2xlarge',
+        'name': 'Large Instance',
+        'ram': 32*1024,
+        'vcpu': 8,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm4.4xlarge': {
+        'id': 'm4.4xlarge',
+        'name': 'Large Instance',
+        'ram': 64*1024,
+        'vcpu': 16,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 10,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm4.10xlarge': {
+        'id': 'm4.10xlarge',
+        'name': 'Large Instance',
+        'ram': 160*1024,
+        'vcpu': 40,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 5,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm3.medium': {
+        'id': 'm3.medium',
+        'name': 'Medium Instance',
+        'ram': 3.75*1024, #3840
+        'vcpu': 1,
+        'disk': 4,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': True
+    },
+    'm3.large': {
+        'id': 'm3.large',
+        'name': 'Large Instance',
+        'ram': 7.5*1024, #7168
+        'vcpu': 2,
+        'disk': 32,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': True
+    },
+    'm3.xlarge': {
+        'id': 'm3.xlarge',
+        'name': 'Extra Large Instance',
+        'ram': 15*1024,#15360
+        'vcpu': 4,
+        'disk': 80,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': True
+    },
+    'm3.2xlarge': {
+        'id': 'm3.2xlarge',
+        'name': 'Double Extra Large Instance',
+        'ram': 30*1024, #30720
+        'vcpu': 8,
+        'disk': 160,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': True
+    },
+    'g2.2xlarge': {
+        'id': 'g2.2xlarge',
+        'name': 'Cluster GPU G2 Double Extra Large Instance',
+        'ram': 15000,
+        'disk': 60,
+        'vcpu': 5,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False
+    },
+    'g2.8xlarge': {
+        'id': 'g2.8xlarge',
+        'name': 'Cluster GPU G2 Double Extra Large Instance',
+        'ram': 60000,
+        'disk': 240,
+        'vcpu': 2,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False
+    },
+    # c4 instances have 2 SSDs of the specified disk size
+    'c4.large': {
+        'id': 'c4.large',
+        'name': 'Compute Optimized Large Instance',
+        'ram': 3750,
+         'vcpu':2,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'c4.xlarge': {
+        'id': 'c4.xlarge',
+        'name': 'Compute Optimized Extra Large Instance',
+        'ram': 7500,
+         'vcpu':4,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'c4.2xlarge': {
+        'id': 'c4.2xlarge',
+        'name': 'Compute Optimized Double Extra Large Instance',
+        'ram': 15000,
+         'vcpu':8,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'c4.4xlarge': {
+        'id': 'c4.4xlarge',
+        'name': 'Compute Optimized Quadruple Extra Large Instance',
+        'ram': 30000,
+         'vcpu':16,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 10,
+        'sriov': True,
+        'paravirt': False
+    },
+    'c4.8xlarge': {
+        'id': 'c4.8xlarge',
+        'name': 'Compute Optimized Eight Extra Large Instance',
+        'ram': 60000,
+         'vcpu':36,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 5,
+        'sriov': True,
+        'paravirt': False
+    },
+    # c3 instances have 2 SSDs of the specified disk size
+    'c3.large': {
+        'id': 'c3.large',
+        'name': 'Compute Optimized Large Instance',
+        'ram': 3750,
+         'vcpu':2,
+        'disk': 32,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    'c3.xlarge': {
+        'id': 'c3.xlarge',
+        'name': 'Compute Optimized Extra Large Instance',
+        'ram': 7500,
+        'vcpu':4,
+        'disk': 80,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    'c3.2xlarge': {
+        'id': 'c3.2xlarge',
+        'name': 'Compute Optimized Double Extra Large Instance',
+        'ram': 15000,
+        'vcpu':8,
+        'disk': 160,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    'c3.4xlarge': {
+        'id': 'c3.4xlarge',
+        'name': 'Compute Optimized Quadruple Extra Large Instance',
+        'ram': 30000,
+        'vcpu':16,
+        'disk': 320,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    'c3.8xlarge': {
+        'id': 'c3.8xlarge',
+        'name': 'Compute Optimized Eight Extra Large Instance',
+        'ram': 60000,
+        'vcpu':32,
+        'disk': 640,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    # i2 instances have up to eight SSD drives
+    'i2.xlarge': {
+        'id': 'i2.xlarge',
+        'name': 'High Storage Optimized Extra Large Instance',
+        'ram': 31232,
+         'vcpu': 4,
+        'disk': 800,
+        'bandwidth': None,
+        'max_inst': 8,
+        'sriov': True,
+        'paravirt': False
+    },
+    'i2.2xlarge': {
+        'id': 'i2.2xlarge',
+        'name': 'High Storage Optimized Double Extra Large Instance',
+        'ram': 62464,
+        'vcpu': 8,
+        'disk': 1600,
+        'bandwidth': None,
+        'max_inst': 8,
+        'sriov': True,
+        'paravirt': False
+    },
+    'i2.4xlarge': {
+        'id': 'i2.4xlarge',
+        'name': 'High Storage Optimized Quadruple Large Instance',
+        'ram': 124928,
+        'vcpu': 16,
+        'disk': 3200,
+        'bandwidth': None,
+        'max_inst': 4,
+        'sriov': True,
+        'paravirt': False
+    },
+    'i2.8xlarge': {
+        'id': 'i2.8xlarge',
+        'name': 'High Storage Optimized Eight Extra Large Instance',
+        'ram': 249856,
+        'vcpu': 32,
+        'disk': 6400,
+        'bandwidth': None,
+        'max_inst': 2,
+        'sriov': True,
+        'paravirt': False
+    },
+    'd2.xlarge': {
+        'id': 'd2.xlarge',
+        'name': 'High Storage Optimized Extra Large Instance',
+        'ram': 30050,
+        'vcpu': 4,
+        'disk': 6000,  # 3 x 2 TB
+        'max_inst': 20,
+        'bandwidth': None,
+        'sriov': True,
+        'paravirt': False
+    },
+    'd2.2xlarge': {
+        'id': 'd2.2xlarge',
+        'name': 'High Storage Optimized Double Extra Large Instance',
+        'ram': 61952,
+        'vcpu': 8,
+        'disk': 12000,  # 6 x 2 TB
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'd2.4xlarge': {
+        'id': 'd2.4xlarge',
+        'name': 'High Storage Optimized Quadruple Extra Large Instance',
+        'ram': 122000,
+        'vcpu': 16,
+        'disk': 24000,  # 12 x 2 TB
+        'bandwidth': None,
+        'max_inst': 10,
+        'sriov': True,
+        'paravirt': False
+    },
+    'd2.8xlarge': {
+        'id': 'd2.8xlarge',
+        'name': 'High Storage Optimized Eight Extra Large Instance',
+        'ram': 244000,
+        'vcpu': 36,
+        'disk': 48000,  # 24 x 2 TB
+        'bandwidth': None,
+        'max_inst': 5,
+        'sriov': True,
+        'paravirt': False
+    },
+    # 1x SSD
+    'r3.large': {
+        'id': 'r3.large',
+        'name': 'Memory Optimized Large instance',
+        'ram': 15000,
+        'vcpu': 2,
+        'disk': 32,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'r3.xlarge': {
+        'id': 'r3.xlarge',
+        'name': 'Memory Optimized Extra Large instance',
+        'ram': 30500,
+        'vcpu': 4,
+        'disk': 80,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'r3.2xlarge': {
+        'id': 'r3.2xlarge',
+        'name': 'Memory Optimized Double Extra Large instance',
+        'ram': 61000,
+        'vcpu': 8,
+        'disk': 160,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'r3.4xlarge': {
+        'id': 'r3.4xlarge',
+        'name': 'Memory Optimized Quadruple Extra Large instance',
+        'ram': 122000,
+        'vcpu': 16,
+        'disk': 320,
+        'bandwidth': None,
+        'max_inst': 10,
+        'sriov': True,
+        'paravirt': False
+    },
+    'r3.8xlarge': {
+        'id': 'r3.8xlarge',
+        'name': 'Memory Optimized Eight Extra Large instance',
+        'ram': 244000,
+        'vcpu': 32,
+        'disk': 320,  # x2
+        'bandwidth': None,
+        'max_inst': 5,
+        'sriov': True,
+        'paravirt': False
+    },
+    't2.micro': {
+        'id': 't2.micro',
+        'name': 'Burstable Performance Micro Instance',
+        'ram': 1024,
+        'disk': 0,  # EBS Only
+        'vcpu': 1,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False,
+        'extra': {
+            'cpu': 6
+        }
+    },
+    # Burstable Performance General Purpose
+    't2.small': {
+        'id': 't2.small',
+        'name': 'Burstable Performance Small Instance',
+        'ram': 2048,
+        'vcpu': 1,
+        'disk': 0,  # EBS Only
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False,
+        'extra': {
+            'cpu': 12
+        }
+    },
+    't2.medium': {
+        'id': 't2.medium',
+        'name': 'Burstable Performance Medium Instance',
+        'ram': 4096,
+        'disk': 0,  # EBS Only
+        'vcpu': 2,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False,
+        'extra': {
+            'cpu': 24
+        }
+    },
+    't2.large': {
+        'id': 't2.large',
+        'name': 'Burstable Performance Large Instance',
+        'ram': 8192,
+        'disk': 0,  # EBS Only
+        'vcpu': 2,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False,
+        'extra': {
+            'cpu': 36
+        }
+    }
+}
+
diff --git a/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/delete_vm.py b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/delete_vm.py
new file mode 100644
index 0000000..05d744b
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/delete_vm.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import rift.rwcal.aws as aws_drv
+import logging
+import argparse
+import rwlogger
+import sys, os, time
+
+logging.basicConfig(level=logging.DEBUG)
+
+logger = logging.getLogger()
+rwlog_handler = rwlogger.RwLogger(category="rw-cal-log",
+                                  subcategory="aws",)
+logger.addHandler(rwlog_handler)
+#logger.setLevel(logging.DEBUG)
+
+        
+def cleanup_vm(drv,argument):
+    vm_inst = drv.get_instance(argument.server_id)
+    logger.info("Waiting for VM instance to get to terminating state")
+    vm_inst.wait_until_terminated()
+    logger.info("VM inst is now in terminating state") 
+
+    for port_id in argument.vdu_port_list:
+        logger.info("Deleting network interface with id %s",port_id)
+        port = drv.get_network_interface(port_id)
+        if port:
+            if port.association and 'AssociationId' in port.association:
+                drv.disassociate_public_ip_from_network_interface(NetworkInterfaceId=port.id)
+            drv.delete_network_interface(port.id)
+        else:
+            logger.error("Newtork interface with id %s not found when deleting interface",port_id)
+    
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to create AWS resources')
+    parser.add_argument('--aws_key',
+                        action = "store",
+                        dest = "aws_key",
+                        type = str,
+                        help='AWS Key')
+
+    parser.add_argument('--aws_secret',
+                        action = "store",
+                        dest = "aws_secret",
+                        type = str,
+                        help = "AWS Secret")
+
+    parser.add_argument('--aws_region',
+                        action = "store",
+                        dest = "aws_region",
+                        type = str,
+                        help = "AWS Region")
+
+    parser.add_argument('--server_id',
+                        action = "store",
+                        dest = "server_id",
+                        type = str,
+                        help = "Server ID on which delete operations needs to be performed")
+    
+    parser.add_argument('--vdu_port_list',
+                        action = "append",
+                        dest = "vdu_port_list",
+                        default = [],
+                        help = "Port id list for vdu")
+
+    argument = parser.parse_args()
+
+    if not argument.aws_key:
+        logger.error("ERROR: AWS key is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS key: %s" %(argument.aws_key))
+
+    if not argument.aws_secret:
+        logger.error("ERROR: AWS Secret is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS Secret: %s" %(argument.aws_secret))
+
+    if not argument.aws_region:
+        logger.error("ERROR: AWS Region is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS Region: %s" %(argument.aws_region))
+
+    if not argument.server_id:
+        logger.error("ERROR: Server ID is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using Server ID : %s" %(argument.server_id))
+        
+    try:
+        pid = os.fork()
+        if pid > 0:
+            # exit for parent
+            sys.exit(0)
+    except OSError as e:
+        logger.error("fork failed: %d (%s)\n" % (e.errno, e.strerror))
+        sys.exit(2)
+        
+    drv = aws_drv.AWSDriver(key = argument.aws_key,
+                            secret  = argument.aws_secret,
+                            region  = argument.aws_region)
+    cleanup_vm(drv, argument)
+    sys.exit(0)
+    
+if __name__ == "__main__":
+    main()
+        
+
diff --git a/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/exceptions.py b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/exceptions.py
new file mode 100644
index 0000000..3bb3aa7
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/exceptions.py
@@ -0,0 +1,54 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+#
+# Rift Exceptions:
+#   These exceptions each coorespond with a rift status as they are defined
+# in rwtypes.vala.  Adding them here so that errors from C transistioning
+# back to python can be handled in a pythonic manner rather than having to
+# inspect return values.
+
+class RWErrorFailure(Exception):
+  pass
+
+class RWErrorDuplicate(Exception):
+  pass
+
+class RWErrorNotFound(Exception):
+  pass
+
+class RWErrorOutOfBounds(Exception):
+  pass
+
+class RWErrorBackpressure(Exception):
+  pass
+
+class RWErrorTimeout(Exception):
+  pass
+
+class RWErrorExists(Exception):
+  pass
+
+class RWErrorNotEmpty(Exception):
+  pass
+
+class RWErrorNotConnected(Exception):
+  pass
+
+class RWErrorNotSupported(Exception):
+  pass
+
diff --git a/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/prepare_vm.py b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/prepare_vm.py
new file mode 100644
index 0000000..e0ae55a
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/prepare_vm.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import rift.rwcal.aws as aws_drv
+import logging
+import argparse
+import rwlogger
+import sys, os, time
+
+logging.basicConfig(level=logging.DEBUG)
+
+logger = logging.getLogger()
+rwlog_handler = rwlogger.RwLogger(category="rw-cal-log",
+                                  subcategory="aws",)
+logger.addHandler(rwlog_handler)
+#logger.setLevel(logging.DEBUG)
+
+
+        
+def prepare_vm_after_boot(drv,argument):
+    vm_inst = drv.get_instance(argument.server_id)
+    logger.info("Waiting for VM instance to get to running state")
+    vm_inst.wait_until_running()
+    logger.info("VM instance is now in running state") 
+    if argument.vdu_name:
+        vm_inst.create_tags(Tags=[{'Key': 'Name','Value':argument.vdu_name}])
+    if argument.vdu_node_id is not None:
+        vm_inst.create_tags(Tags=[{'Key':'node_id','Value':argument.vdu_node_id}])    
+    
+    for index,port_id in enumerate(argument.vdu_port_list):
+        logger.info("Attaching network interface with id %s to VDU instance %s",port_id,vm_inst.id)
+        drv.attach_network_interface(NetworkInterfaceId = port_id,InstanceId = vm_inst.id,DeviceIndex=index+1)
+    
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to create AWS resources')
+    parser.add_argument('--aws_key',
+                        action = "store",
+                        dest = "aws_key",
+                        type = str,
+                        help='AWS Key')
+
+    parser.add_argument('--aws_secret',
+                        action = "store",
+                        dest = "aws_secret",
+                        type = str,
+                        help = "AWS Secret")
+
+    parser.add_argument('--aws_region',
+                        action = "store",
+                        dest = "aws_region",
+                        type = str,
+                        help = "AWS Region")
+
+    parser.add_argument('--server_id',
+                        action = "store",
+                        dest = "server_id",
+                        type = str,
+                        help = "Server ID on which boot operations needs to be performed")
+    
+    parser.add_argument('--vdu_name',
+                        action = "store",
+                        dest = "vdu_name",
+                        type = str,
+                        help = "VDU name")
+
+    parser.add_argument('--vdu_node_id',
+                        action = "store",
+                        dest = "vdu_node_id",
+                        help = "Node id for vdu")
+
+    parser.add_argument('--vdu_port_list',
+                        action = "append",
+                        dest = "vdu_port_list",
+                        default = [],
+                        help = "Port id list for vdu")
+
+    argument = parser.parse_args()
+
+    if not argument.aws_key:
+        logger.error("ERROR: AWS key is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS key: %s" %(argument.aws_key))
+
+    if not argument.aws_secret:
+        logger.error("ERROR: AWS Secret is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS Secret: %s" %(argument.aws_secret))
+
+    if not argument.aws_region:
+        logger.error("ERROR: AWS Region is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS Region: %s" %(argument.aws_region))
+
+    if not argument.server_id:
+        logger.error("ERROR: Server ID is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using Server ID : %s" %(argument.server_id))
+        
+    try:
+        pid = os.fork()
+        if pid > 0:
+            # exit for parent
+            sys.exit(0)
+    except OSError as e:
+        logger.error("fork failed: %d (%s)\n" % (e.errno, e.strerror))
+        sys.exit(2)
+        
+    drv = aws_drv.AWSDriver(key = argument.aws_key,
+                            secret  = argument.aws_secret,
+                            region  = argument.aws_region)
+    prepare_vm_after_boot(drv, argument)
+    sys.exit(0)
+    
+if __name__ == "__main__":
+    main()
+        
+
diff --git a/rwcal/plugins/vala/rwcal_aws/rwcal_aws.py b/rwcal/plugins/vala/rwcal_aws/rwcal_aws.py
new file mode 100644
index 0000000..4f212d7
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_aws/rwcal_aws.py
@@ -0,0 +1,1111 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import time
+import os
+import subprocess
+import logging
+import rift.rwcal.aws as aws_drv
+import rw_status
+import rift.cal.rwcal_status as rwcal_status
+import rwlogger
+import rift.rwcal.aws.exceptions as exceptions
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+
+PREPARE_VM_CMD = "prepare_vm.py --aws_key {key} --aws_secret {secret} --aws_region {region} --server_id {server_id}"
+DELETE_VM_CMD =  "delete_vm.py --aws_key {key} --aws_secret {secret} --aws_region {region} --server_id {server_id}"
+
+rwstatus_exception_map = {IndexError: RwTypes.RwStatus.NOTFOUND,
+                          KeyError: RwTypes.RwStatus.NOTFOUND,
+                          NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,
+                          AttributeError: RwTypes.RwStatus.FAILURE,
+                          exceptions.RWErrorNotFound: RwTypes.RwStatus.NOTFOUND,
+                          exceptions.RWErrorDuplicate: RwTypes.RwStatus.DUPLICATE,
+                          exceptions.RWErrorExists: RwTypes.RwStatus.EXISTS,
+                          exceptions.RWErrorNotConnected: RwTypes.RwStatus.NOTCONNECTED,
+                          }
+
+rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
+rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
+
+class RwcalAWSPlugin(GObject.Object, RwCal.Cloud):
+    """This class implements the CAL VALA methods for AWS."""
+
+    flavor_id = 1;
+    instance_num = 1
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self._driver_class = aws_drv.AWSDriver
+        self._flavor_list = []
+        self.log = logging.getLogger('rwcal.aws.%s' % RwcalAWSPlugin.instance_num)
+        self.log.setLevel(logging.DEBUG)
+
+        RwcalAWSPlugin.instance_num += 1
+
+    def _get_driver(self, account):
+        return self._driver_class(key     = account.aws.key,
+                                  secret  = account.aws.secret,
+                                  region  = account.aws.region,
+                                  ssh_key = account.aws.ssh_key,
+                                  vpcid   = account.aws.vpcid,
+                                  availability_zone = account.aws.availability_zone,
+                                  default_subnet_id = account.aws.default_subnet_id)
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        self.log.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-cal-log",
+                    subcategory="aws",
+                    log_hdl=rwlog_ctx,
+                    )
+                )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        Performs an access to the resources using underlying API. If creds
+        are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus(
+                status="success",
+                details="AWS Cloud Account validation not implemented yet"
+                )
+
+        return status
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_management_network(self, account):
+        """
+        Returns the management network associated with the specified account.
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            The management network
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_tenant(self, account, name):
+        """Create a new tenant.
+
+        Arguments:
+            account - a cloud account
+            name - name of the tenant
+
+        Returns:
+            The tenant id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """delete a tenant.
+
+        Arguments:
+            account - a cloud account
+            tenant_id - id of the tenant
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """List tenants.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of tenants
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_role(self, account, name):
+        """Create a new user.
+
+        Arguments:
+            account - a cloud account
+            name - name of the user
+
+        Returns:
+            The user id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """Delete a user.
+
+        Arguments:
+            account - a cloud account
+            role_id - id of the user
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """List roles.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of roles
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_image(self, account, image):
+        """Create an image
+
+        Arguments:
+            account - a cloud account
+            image - a description of the image to create
+
+        Returns:
+            The image id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """Delete a vm image.
+
+        Arguments:
+            account - a cloud account
+            image_id - id of the image to delete
+        """
+        raise NotImplementedError
+
+    @staticmethod
+    def _fill_image_info(img_info):
+        """Create a GI object from image info dictionary
+
+        Converts image information dictionary object returned by AWS
+        driver into Protobuf Gi Object
+
+        Arguments:
+            account - a cloud account
+            img_info - image information dictionary object from AWS
+
+        Returns:
+            The ImageInfoItem
+        """
+        img = RwcalYang.ImageInfoItem()
+        img.name = img_info.name
+        img.id   = img_info.id
+
+        #tag_fields = ['checksum']
+        # Look for any properties
+        if img_info.tags:
+            for tag in img_info.tags:
+                if tag['Key'] == 'checksum':
+                    setattr(img, tag['Key'], tag['Value'])
+        img.disk_format  = 'ami'
+        if img_info.state == 'available':
+            img.state = 'active'
+        else:
+            img.state = 'inactive'
+        return img
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """Return a list of the names of all available images.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            The the list of images in VimResources object
+        """
+        response = RwcalYang.VimResources()
+        images = self._get_driver(account).list_images()
+        for img in images:
+            response.imageinfo_list.append(RwcalAWSPlugin._fill_image_info(img))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        """Return a image information.
+
+        Arguments:
+            account - a cloud account
+            image_id - an id of the image
+
+        Returns:
+            ImageInfoItem object containing image information.
+        """
+        image = self._get_driver(account).get_image(image_id)
+        return RwcalAWSPlugin._fill_image_info(image)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vm(self, account, vminfo):
+        """Create a new virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vminfo - information that defines the type of VM to create
+
+        Returns:
+            The image id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """Start an existing virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """Stop a running virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """Delete a virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """Reboot a virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        raise NotImplementedError
+
+    @staticmethod
+    def _fill_vm_info(vm_info):
+        """Create a GI object from vm info dictionary
+
+        Converts VM information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            vm_info - VM information from AWS
+
+        Returns:
+            Protobuf Gi object for VM
+        """
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_id     = vm_info.id
+        vm.image_id  = vm_info.image_id
+        vm.flavor_id = vm_info.instance_type
+        if vm_info.state['Name'] == 'running':
+            vm.state = 'active'
+        else:
+            vm.state = 'inactive'
+        for network_intf in vm_info.network_interfaces:
+            if 'Attachment' in network_intf and network_intf['Attachment']['DeviceIndex'] == 0:
+                if 'Association' in network_intf and 'PublicIp' in network_intf['Association']:
+                    vm.public_ip = network_intf['Association']['PublicIp']
+                vm.management_ip = network_intf['PrivateIpAddress']
+            else:
+                addr = vm.private_ip_list.add()
+                addr.ip_address = network_intf['PrivateIpAddress']
+                if 'Association' in network_intf and 'PublicIp' in network_intf['Association']:
+                    addr = vm.public_ip_list.add()
+                    addr.ip_address = network_intf['Association']['PublicIp']
+
+        if vm_info.placement and 'AvailabilityZone' in vm_info.placement:
+            vm.availability_zone = vm_info.placement['AvailabilityZone']
+        if vm_info.tags:
+            for tag in vm_info.tags:
+                if tag['Key'] == 'Name':
+                    vm.vm_name   = tag['Value']
+                elif tag['Key'] in vm.user_tags.fields:
+                    setattr(vm.user_tags,tag['Key'],tag['Value'])
+        return vm
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        """Return a list of the VMs as vala boxed objects
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List containing VM information
+        """
+        response = RwcalYang.VimResources()
+        vms = self._get_driver(account).list_instances()
+        for vm in vms:
+            response.vminfo_list.append(RwcalAWSPlugin._fill_vm_info(vm))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vm(self, account, id):
+        """Return vm information.
+
+        Arguments:
+            account - a cloud account
+            id - an id for the VM
+
+        Returns:
+            VM information
+        """
+        vm = self._get_driver(account).get_instance(id)
+        return RwcalAWSPlugin._fill_vm_info(vm)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_flavor(self, account, flavor):
+        """Create new flavor.
+           AWS has fixed set of AWS types and so we map flavor to existing instance type
+           and create local flavor for the same.
+
+        Arguments:
+            account - a cloud account
+            flavor - flavor of the VM
+
+        Returns:
+            flavor id (with EC2 instance type included in id)
+        """
+        drv = self._get_driver(account)
+        inst_type = drv.map_flavor_to_instance_type(ram       = flavor.vm_flavor.memory_mb,
+			         vcpus     = flavor.vm_flavor.vcpu_count,
+			         disk      = flavor.vm_flavor.storage_gb)
+
+        new_flavor = RwcalYang.FlavorInfoItem()
+        new_flavor.name = flavor.name
+        new_flavor.vm_flavor.memory_mb = flavor.vm_flavor.memory_mb
+        new_flavor.vm_flavor.vcpu_count = flavor.vm_flavor.vcpu_count
+        new_flavor.vm_flavor.storage_gb = flavor.vm_flavor.storage_gb
+        new_flavor.id = inst_type + '-' + str(RwcalAWSPlugin.flavor_id)
+        RwcalAWSPlugin.flavor_id = RwcalAWSPlugin.flavor_id+1
+        self._flavor_list.append(new_flavor)
+        return new_flavor.id
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """Delete flavor.
+
+        Arguments:
+            account - a cloud account
+            flavor_id - id flavor of the VM
+        """
+
+        flavor = [flav for flav in self._flavor_list if flav.id == flavor_id]
+        self._flavor_list.delete(flavor[0])
+
+    @staticmethod
+    def _fill_flavor_info(flavor_info):
+        """Create a GI object from flavor info dictionary
+
+        Converts Flavor information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            flavor_info: Flavor information from openstack
+
+        Returns:
+             Object of class FlavorInfoItem
+        """
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name                       = flavor_info.name
+        flavor.id                         = flavor_info.id
+        flavor.vm_flavor.memory_mb = flavor_info.vm_flavor.memory_mb
+        flavor.vm_flavor.vcpu_count = flavor_info.vm_flavor.vcpu_count
+        flavor.vm_flavor.storage_gb = flavor_info.vm_flavor.storage_gb
+        return flavor
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """Return flavor information.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of flavors
+        """
+        response = RwcalYang.VimResources()
+        for flv in self._flavor_list:
+            response.flavorinfo_list.append(RwcalAWSPlugin._fill_flavor_info(flv))
+        return response
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, id):
+        """Return flavor information.
+
+        Arguments:
+            account - a cloud account
+            id - an id for the flavor
+
+        Returns:
+            Flavor info item
+        """
+        flavor = [flav for flav in self._flavor_list if flav.id == id]
+        return (RwcalAWSPlugin._fill_flavor_info(flavor[0]))
+
+    def _fill_network_info(self, network_info, account):
+        """Create a GI object from network info dictionary
+
+        Converts Network information dictionary object returned by AWS
+        driver into Protobuf Gi Object
+
+        Arguments:
+            network_info - Network information from AWS
+            account - a cloud account
+
+        Returns:
+            Network info item
+        """
+        network                  = RwcalYang.NetworkInfoItem()
+        network.network_id       = network_info.subnet_id
+        network.subnet           = network_info.cidr_block
+        if network_info.tags:
+            for tag in network_info.tags:
+                if tag['Key'] == 'Name':
+                    network.network_name   = tag['Value']
+        return network
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        """Return a list of networks
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of networks
+        """
+        response = RwcalYang.VimResources()
+        networks = self._get_driver(account).get_subnet_list()
+        for network in networks:
+            response.networkinfo_list.append(self._fill_network_info(network, account))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, id):
+        """Return a network
+
+        Arguments:
+            account - a cloud account
+            id - an id for the network
+
+        Returns:
+            Network info item
+        """
+        network = self._get_driver(account).get_subnet(id)
+        return self._fill_network_info(network, account)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_network(self, account, network):
+        """Create a new network
+
+        Arguments:
+            account - a cloud account
+            network - Network object
+
+        Returns:
+            Network id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        """Delete a network
+
+        Arguments:
+            account - a cloud account
+            network_id - an id for the network
+        """
+        raise NotImplementedError
+
+    @staticmethod
+    def _fill_port_info(port_info):
+        """Create a GI object from port info dictionary
+
+        Converts Port information dictionary object returned by AWS
+        driver into Protobuf Gi Object
+
+        Arguments:
+            port_info - Port/Network interface information from AWS
+
+        Returns:
+            Port info item
+        """
+        port = RwcalYang.PortInfoItem()
+
+        port.port_id    = port_info.id
+        port.network_id = port_info.subnet_id
+        if port_info.attachment and 'InstanceId' in port_info.attachment:
+            port.vm_id = port_info.attachment['InstanceId']
+        port.ip_address = port_info.private_ip_address
+        if port_info.status == 'in-use':
+            port.port_state = 'active'
+        elif port_info.status == 'available':
+            port.port_state = 'inactive'
+        else:
+            port.port_state = 'unknown'
+        if port_info.tag_set:
+            for tag in port_info.tag_set:
+                if tag['Key'] == 'Name':
+                    port.port_name   = tag['Value']
+        return port
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        """Return a port
+
+        Arguments:
+            account - a cloud account
+            port_id - an id for the port
+
+        Returns:
+            Port info item
+        """
+        port = self._get_driver(account).get_network_interface(port_id)
+        return RwcalAWSPlugin._fill_port_info(port)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        """Return a list of ports
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            Port info list
+        """
+        response = RwcalYang.VimResources()
+        ports = self._get_driver(account).get_network_interface_list()
+        for port in ports:
+            response.portinfo_list.append(RwcalAWSPlugin._fill_port_info(port))
+        return response
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_port(self, account, port):
+        """Create a new port
+
+        Arguments:
+            account - a cloud account
+            port - port object
+
+        Returns:
+            Port id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        """Delete a port
+
+        Arguments:
+            account - a cloud account
+            port_id - an id for port
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_add_host(self, account, host):
+        """Add a new host
+
+        Arguments:
+            account - a cloud account
+            host - a host object
+
+        Returns:
+            An id for the host
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        """Remove a host
+
+        Arguments:
+            account - a cloud account
+            host_id - an id for the host
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        """Return a host
+
+        Arguments:
+            account - a cloud account
+            host_id - an id for host
+
+        Returns:
+            Host info item
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        """Return a list of hosts
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of hosts
+        """
+        raise NotImplementedError
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        drv = self._get_driver(account)
+        kwargs = {}
+        kwargs['CidrBlock'] = link_params.subnet
+
+        subnet =  drv.create_subnet(**kwargs)
+        if link_params.name:
+            subnet.create_tags(Tags=[{'Key': 'Name','Value':link_params.name}])
+        if link_params.associate_public_ip:
+              drv.modify_subnet(SubnetId=subnet.id,MapPublicIpOnLaunch=link_params.associate_public_ip)
+        return subnet.id
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        """Delete a virtual link
+
+        Arguments:
+            account - a cloud account
+            link_id - id for the virtual-link to be deleted
+
+        Returns:
+            None
+        """
+        drv = self._get_driver(account)
+        port_list = drv.get_network_interface_list(SubnetId=link_id)
+        for port in port_list:
+            if port  and port.association and 'AssociationId' in port.association:
+                drv.disassociate_public_ip_from_network_interface(NetworkInterfaceId=port.id)
+            if port and port.attachment and 'AttachmentId' in port.attachment:
+                drv.detach_network_interface(AttachmentId = port.attachment['AttachmentId'],Force=True) #force detach as otherwise delete fails
+                #detach instance takes time; so poll to check port is not in-use
+                port = drv.get_network_interface(NetworkInterfaceId=port.id)
+                retries = 0
+                while port.status == 'in-use' and retries < 10:
+                    time.sleep(5)
+                    port = drv.get_network_interface(NetworkInterfaceId=port.id)
+            drv.delete_network_interface(NetworkInterfaceId=port.id)
+        drv.delete_subnet(link_id)
+
+    @staticmethod
+    def _fill_connection_point_info(c_point, port_info):
+        """Create a GI object for RwcalYang.VDUInfoParams_ConnectionPoints()
+
+        Converts EC2.NetworkInterface object returned by AWS driver into
+        Protobuf Gi Object
+
+        Arguments:
+            port_info - Network Interface information from AWS
+        Returns:
+            Protobuf Gi object for RwcalYang.VDUInfoParams_ConnectionPoints
+        """
+        c_point.virtual_link_id = port_info.subnet_id
+        c_point.connection_point_id = port_info.id
+        if port_info.attachment:
+            c_point.vdu_id = port_info.attachment['InstanceId']
+        c_point.ip_address = port_info.private_ip_address
+        if port_info.association and 'PublicIp' in port_info.association:
+                c_point.public_ip = port_info.association['PublicIp']
+        if port_info.tag_set:
+            for tag in port_info.tag_set:
+                if tag['Key'] == 'Name':
+                    c_point.name   = tag['Value']
+        if port_info.status == 'in-use':
+            c_point.state = 'active'
+        elif port_info.status == 'available':
+            c_point.state = 'inactive'
+        else:
+            c_point.state = 'unknown'
+
+    @staticmethod
+    def _fill_virtual_link_info(network_info, port_list):
+        """Create a GI object for VirtualLinkInfoParams
+
+        Converts Subnet and NetworkInterface object
+        returned by AWS driver into Protobuf Gi Object
+
+        Arguments:
+            network_info - Subnet information from AWS
+            port_list - A list of network interface information from openstack
+        Returns:
+            Protobuf Gi object for VirtualLinkInfoParams
+        """
+        link = RwcalYang.VirtualLinkInfoParams()
+        if network_info.state == 'available':
+            link.state = 'active'
+        else:
+            link.state = 'inactive'
+        link.virtual_link_id = network_info.subnet_id
+        link.subnet = network_info.cidr_block
+        if network_info.tags:
+            for tag in network_info.tags:
+                if tag['Key'] == 'Name':
+                    link.name   = tag['Value']
+        for port in port_list:
+            c_point = link.connection_points.add()
+            RwcalAWSPlugin._fill_connection_point_info(c_point, port)
+
+        return link
+
+    @staticmethod
+    def _fill_vdu_info(vm_info, port_list):
+        """Create a GI object for VDUInfoParams
+
+        Converts VM information dictionary object returned by AWS
+        driver into Protobuf Gi Object
+
+        Arguments:
+            vm_info - EC2 instance information from AWS
+            port_list - A list of network interface information from AWS
+        Returns:
+            Protobuf Gi object for VDUInfoParams
+        """
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.vdu_id = vm_info.id
+        mgmt_port = [port for port in port_list if port.attachment and port.attachment['DeviceIndex'] == 0]
+        assert(len(mgmt_port) == 1)
+        vdu.management_ip = mgmt_port[0].private_ip_address
+        if mgmt_port[0].association and 'PublicIp' in mgmt_port[0].association:
+            vdu.public_ip = mgmt_port[0].association['PublicIp']
+            #For now set managemnet ip also to public ip
+            #vdu.management_ip = vdu.public_ip
+        if vm_info.tags:
+            for tag in vm_info.tags:
+                if tag['Key'] == 'Name':
+                    vdu.name   = tag['Value']
+                elif tag['Key'] == 'node_id':
+                    vdu.node_id = tag['Value']
+        vdu.image_id = vm_info.image_id
+        vdu.flavor_id = vm_info.instance_type
+        if vm_info.state['Name'] == 'running':
+            vdu.state = 'active'
+        else:
+            vdu.state = 'inactive'
+        #if vm_info.placement and 'AvailabilityZone' in vm_info.placement:
+        #    vdu.availability_zone = vm_info.placement['AvailabilityZone']
+        # Fill the port information
+        cp_port_list = [port for port in port_list if port.attachment and port.attachment['DeviceIndex'] != 0]
+
+        for port in cp_port_list:
+            c_point = vdu.connection_points.add()
+            RwcalAWSPlugin._fill_connection_point_info(c_point, port)
+        return vdu
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        """Get information about virtual link.
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link
+
+        Returns:
+            Object of type RwcalYang.VirtualLinkInfoParams
+        """
+        drv = self._get_driver(account)
+        network = drv.get_subnet(SubnetId=link_id)
+        port_list = drv.get_network_interface_list(SubnetId=link_id)
+        virtual_link = RwcalAWSPlugin._fill_virtual_link_info(network, port_list)
+        return virtual_link
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_virtual_link_list(self, account):
+        """Get information about all the virtual links
+
+        Arguments:
+            account  - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VirtualLinkInfoParams
+        """
+        vnf_resources = RwcalYang.VNFResources()
+        drv = self._get_driver(account)
+        networks = drv.get_subnet_list()
+        for network in networks:
+            port_list = drv.get_network_interface_list(SubnetId=network.id)
+            virtual_link = RwcalAWSPlugin._fill_virtual_link_info(network, port_list)
+            vnf_resources.virtual_link_info_list.append(virtual_link)
+        return vnf_resources
+
+    def _create_connection_point(self, account, c_point):
+        """
+        Create a connection point
+        Arguments:
+           account  - a cloud account
+           c_point  - connection_points
+        """
+        drv = self._get_driver(account)
+        port     = drv.create_network_interface(SubnetId=c_point.virtual_link_id)
+        if c_point.name:
+            port.create_tags(Tags=[{'Key': 'Name','Value':c_point.name}])
+        if c_point.associate_public_ip:
+                drv.associate_public_ip_to_network_interface(NetworkInterfaceId = port.id)
+        return port
+
+    def prepare_vdu_on_boot(self, account, server_id,vdu_init_params,vdu_port_list = None):
+        cmd = PREPARE_VM_CMD.format(key     = account.aws.key,
+                                  secret  = account.aws.secret,
+                                  region  = account.aws.region,
+                                  server_id = server_id)
+        if vdu_init_params.has_field('name'):
+            cmd += (" --vdu_name "+ vdu_init_params.name)
+        if vdu_init_params.has_field('node_id'):
+            cmd += (" --vdu_node_id "+ vdu_init_params.node_id)
+        if vdu_port_list is not None:
+            for port_id in vdu_port_list:
+                cmd += (" --vdu_port_list "+ port_id)
+
+        exec_path = 'python3 ' + os.path.dirname(aws_drv.__file__)
+        exec_cmd = exec_path+'/'+cmd
+        self.log.info("Running command: %s" %(exec_cmd))
+        subprocess.call(exec_cmd, shell=True)
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        """Create a new virtual deployment unit
+
+        Arguments:
+            account     - a cloud account
+            vdu_init  - information about VDU to create (RwcalYang.VDUInitParams)
+
+        Returns:
+            The vdu_id
+        """
+        drv = self._get_driver(account)
+        ### First create required number of ports aka connection points
+        port_list = []
+
+        ### Now Create VM
+        kwargs = {}
+        kwargs['ImageId'] = vdu_init.image_id
+        if vdu_init.has_field('flavor_id'):
+            #Get instance type from flavor id which is of form c3.xlarge-1
+            inst_type =  vdu_init.flavor_id.split('-')[0]
+        else:
+            inst_type = drv.map_flavor_to_instance_type(ram       = vdu_init.vm_flavor.memory_mb,
+			         vcpus     = vdu_init.vm_flavor.vcpu_count,
+			         disk      = vdu_init.vm_flavor.storage_gb)
+
+        kwargs['InstanceType'] = inst_type
+        if vdu_init.vdu_init and vdu_init.vdu_init.userdata:
+            kwargs['UserData'] = vdu_init.vdu_init.userdata
+
+        #If we need to allocate public IP address create network interface and associate elastic
+        #ip  to interface
+        if vdu_init.allocate_public_address:
+           port_id     = drv.create_network_interface(SubnetId=drv.default_subnet_id)
+           drv.associate_public_ip_to_network_interface(NetworkInterfaceId = port_id.id)
+           network_interface  = {'NetworkInterfaceId':port_id.id,'DeviceIndex':0}
+           kwargs['NetworkInterfaces'] = [network_interface]
+
+        #AWS Driver will use default subnet id to create first network interface
+        # if network interface is not specified and will also have associate public ip
+        # if enabled for the subnet
+        vm_inst = drv.create_instance(**kwargs)
+
+        # Wait for instance to get to running state before attaching network interface
+        # to instance
+        #vm_inst[0].wait_until_running()
+
+        #if vdu_init.name:
+            #vm_inst[0].create_tags(Tags=[{'Key': 'Name','Value':vdu_init.name}])
+        #if vdu_init.node_id is not None:
+            #vm_inst[0].create_tags(Tags=[{'Key':'node_id','Value':vdu_init.node_id}])
+
+        # Create the connection points
+        port_list = []
+        for index,c_point in enumerate(vdu_init.connection_points):
+            port_id = self._create_connection_point(account, c_point)
+            port_list.append(port_id.id)
+            #drv.attach_network_interface(NetworkInterfaceId = port_id.id,InstanceId = vm_inst[0].id,DeviceIndex=index+1)
+
+        # We wait for instance to get to running state and update name,node_id and attach network intfs
+        self.prepare_vdu_on_boot(account, vm_inst[0].id, vdu_init, port_list)
+
+        return vm_inst[0].id
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        """Modify Properties of existing virtual deployment unit
+
+        Arguments:
+            account     -  a cloud account
+            vdu_modify  -  Information about VDU Modification (RwcalYang.VDUModifyParams)
+        """
+        ### First create required number of ports aka connection points
+        drv = self._get_driver(account)
+        port_list = []
+
+        vm_inst = drv.get_instance(vdu_modify.vdu_id)
+
+        if vm_inst.state['Name'] != 'running':
+            self.log.error("RWCAL-AWS: VM with id %s is not in running state during modify VDU",vdu_modify.vdu_id)
+            raise exceptions.RWErrorFailure("RWCAL-AWS: VM with id %s is not in running state during modify VDU",vdu_modify.vdu_id)
+
+        port_list = drv.get_network_interface_list(InstanceId = vdu_modify.vdu_id)
+        used_device_indexs = [port.attachment['DeviceIndex'] for port in port_list if port.attachment]
+
+        device_index = 1
+        for c_point in vdu_modify.connection_points_add:
+            #Get unused device index
+            while device_index in used_device_indexs:
+                device_index = device_index+1
+            port_id = self._create_connection_point(account, c_point)
+            drv.attach_network_interface(NetworkInterfaceId = port_id.id,InstanceId = vdu_modify.vdu_id,DeviceIndex =device_index)
+
+        ### Detach the requested connection_points
+        for c_point in vdu_modify.connection_points_remove:
+            port = drv.get_network_interface(NetworkInterfaceId=c_point.connection_point_id)
+            #Check if elastic IP is associated with interface and release it
+            if port  and port.association and 'AssociationId' in port.association:
+                drv.disassociate_public_ip_from_network_interface(NetworkInterfaceId=port.id)
+            if port and port.attachment and port.attachment['DeviceIndex'] != 0:
+                drv.detach_network_interface(AttachmentId = port.attachment['AttachmentId'],Force=True) #force detach as otherwise delete fails
+            else:
+                self.log.error("RWCAL-AWS: Cannot modify connection port at index 0")
+
+        # Delete the connection points. Interfaces take time to get detached from instance and so
+        # we check status before doing delete network interface
+        for c_point in vdu_modify.connection_points_remove:
+            port = drv.get_network_interface(NetworkInterfaceId=c_point.connection_point_id)
+            retries = 0
+            if port and port.attachment and port.attachment['DeviceIndex'] == 0:
+                self.log.error("RWCAL-AWS: Cannot modify connection port at index 0")
+                continue
+            while port.status == 'in-use' and retries < 10:
+                time.sleep(5)
+                port = drv.get_network_interface(NetworkInterfaceId=c_point.connection_point_id)
+            drv.delete_network_interface(port.id)
+
+    def cleanup_vdu_on_term(self, account, server_id,vdu_port_list = None):
+        cmd = DELETE_VM_CMD.format(key    = account.aws.key,
+                                  secret  = account.aws.secret,
+                                  region  = account.aws.region,
+                                  server_id = server_id)
+        if vdu_port_list is not None:
+            for port_id in vdu_port_list:
+                cmd += (" --vdu_port_list "+ port_id)
+
+        exec_path = 'python3 ' + os.path.dirname(aws_drv.__file__)
+        exec_cmd = exec_path+'/'+cmd
+        self.log.info("Running command: %s" %(exec_cmd))
+        subprocess.call(exec_cmd, shell=True)
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        """Delete a virtual deployment unit
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu to be deleted
+
+        Returns:
+            None
+        """
+        drv = self._get_driver(account)
+        ### Get list of port on VM and delete them.
+        #vm_inst = drv.get_instance(vdu_id)
+
+        port_list = drv.get_network_interface_list(InstanceId = vdu_id)
+        delete_port_list = [port.id for port in port_list if port.attachment and port.attachment['DeleteOnTermination'] is False]
+        drv.terminate_instance(vdu_id)
+
+        self.cleanup_vdu_on_term(account,vdu_id,delete_port_list)
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        """Get information about a virtual deployment unit.
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu
+
+        Returns:
+            Object of type RwcalYang.VDUInfoParams
+        """
+        drv = self._get_driver(account)
+
+        ### Get list of ports excluding the one for management network
+        vm = drv.get_instance(vdu_id)
+        port_list = drv.get_network_interface_list(InstanceId = vdu_id)
+        return RwcalAWSPlugin._fill_vdu_info(vm,port_list)
+
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vdu_list(self, account):
+        """Get information about all the virtual deployment units
+
+        Arguments:
+            account     - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VDUInfoParams
+        """
+        vnf_resources = RwcalYang.VNFResources()
+        drv = self._get_driver(account)
+        vms = drv.list_instances()
+        for vm in vms:
+            ### Get list of ports excluding one for management network
+            port_list = [p for p in drv.get_network_interface_list(InstanceId = vm.id)]
+            vdu = RwcalAWSPlugin._fill_vdu_info(vm,
+                                                port_list)
+            vnf_resources.vdu_info_list.append(vdu)
+        return vnf_resources
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/CMakeLists.txt b/rwcal/plugins/vala/rwcal_cloudsim/CMakeLists.txt
new file mode 100644
index 0000000..3250db9
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsim/CMakeLists.txt
@@ -0,0 +1,39 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+set(PKG_NAME rwcal-cloudsim)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+rift_install_python_plugin(rwcal_cloudsim rwcal_cloudsim.py)
+
+rift_python_install_tree(
+  FILES
+    rift/rwcal/cloudsim/__init__.py
+    rift/rwcal/cloudsim/core.py
+    rift/rwcal/cloudsim/exceptions.py
+    rift/rwcal/cloudsim/image.py
+    rift/rwcal/cloudsim/lvm.py
+    rift/rwcal/cloudsim/lxc.py
+    rift/rwcal/cloudsim/net.py
+    rift/rwcal/cloudsim/shell.py
+  PYTHON3_ONLY
+  COMPONENT ${PKG_LONG_NAME})
+
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/Makefile b/rwcal/plugins/vala/rwcal_cloudsim/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsim/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/__init__.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/__init__.py
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/core.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/core.py
new file mode 100644
index 0000000..86c1952
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/core.py
@@ -0,0 +1,367 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import functools
+
+from . import exceptions
+
+
+def unsupported(f):
+    @functools.wraps(f)
+    def impl(*args, **kwargs):
+        msg = '{} not supported'.format(f.__name__)
+        raise exceptions.RWErrorNotSupported(msg)
+
+    return impl
+
+
+class Cloud(object):
+    """
+    Cloud defines a base class for cloud driver implementations. Note that
+    not all drivers will support the complete set of functionality presented
+    here.
+    """
+
+    @unsupported
+    def get_management_network(self, account):
+        """
+        Returns the management network associated with the specified account.
+
+        @param account - a cloud account
+
+        @return a management network
+        """
+        pass
+
+    @unsupported
+    def create_tenant(self, account, name):
+        """
+        Create a new tenant.
+
+        @param account - a cloud account
+        @param name    - name to assign to the tenant.
+        """
+        pass
+
+    @unsupported
+    def delete_tenant(self, account, tenant_id):
+        """
+        delete a tenant.
+
+        @param account   - a cloud account
+        @param tenant_id - id of tenant to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_tenant_list(self, account):
+        """
+        List tenants.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_role(self, account, name):
+        """
+        Create a new role.
+
+        @param account - a cloud account
+        @param name    - name to assign to the role.
+        """
+        pass
+
+    @unsupported
+    def delete_role(self, account, role_id):
+        """
+        delete a role.
+
+        @param account - a cloud account
+        @param role_id - id of role to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_role_list(self, account):
+        """
+        List roles.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_image(self, account, image):
+        """
+        Create an image
+
+        @param account - a cloud account
+        @param image   - a description of the image to create
+        """
+        pass
+
+    @unsupported
+    def delete_image(self, account, image_id):
+        """
+        delete a vm image.
+
+        @param account  - a cloud account
+        @param image_id - Instance id of VM image to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_image_list(self, account):
+        """
+        Return a list of the names of all available images.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def get_image(self, account, image_id):
+        """
+        Returns image information.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_vm(self, account, vm):
+        """
+        Create a new virtual machine.
+
+        @param account - a cloud account
+        @param vm      - The info required to create a VM
+        """
+        pass
+
+    @unsupported
+    def start_vm(self, account, vm_id):
+        """
+        start an existing virtual machine.
+
+        @param account - a cloud account
+        @param vm_id   - The id of the VM to start
+        """
+        pass
+
+    @unsupported
+    def stop_vm(self, account, vm_id):
+        """
+        Stop a running virtual machine.
+
+        @param account - a cloud account
+        @param vm_id   - The id of the VM to stop
+        """
+        pass
+
+    @unsupported
+    def delete_vm(self, account, vm_id):
+        """
+        delete a virtual machine.
+
+        @param account - a cloud account
+        @param vm_id   - Instance id of VM to be deleted.
+        """
+        pass
+
+    @unsupported
+    def reboot_vm(self, account, vm_id):
+        """
+        reboot a virtual machine.
+
+        @param account - a cloud account
+        @param vm_id   - Instance id of VM to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_vm_list(self, account):
+        """
+        Return a list of vms.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def get_vm(self, account):
+        """
+        Return vm information.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_flavor(self, account, flavor):
+        """
+        create new flavor.
+
+        @param account - a cloud account
+        @param flavor  - Flavor object
+        """
+        pass
+
+    @unsupported
+    def delete_flavor(self, account, flavor_id):
+        """
+        Delete flavor.
+
+        @param account   - a cloud account
+        @param flavor_id - Flavor id to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_flavor_list(self, account):
+        """
+        Return a list of flavors.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def get_flavor(self, account):
+        """
+        Return flavor information.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def get_network(self, account, network_id):
+        """
+        Return a network
+
+        @param account    - a cloud account
+        @param network_id - unique network identifier
+        """
+        pass
+
+    @unsupported
+    def get_network_list(self, account):
+        """
+        Return a list of networks
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_network(self, account, network):
+        """
+        Create a new network
+
+        @param account - a cloud account
+        @param network - Network object
+        """
+        pass
+
+    @unsupported
+    def delete_network(self, account, network_id):
+        """
+        Delete a network
+
+        @param account    - a cloud account
+        @param network_id - unique network identifier
+        """
+        pass
+
+    @unsupported
+    def get_port(self, account, port_id):
+        """
+        Return a port
+
+        @param account - a cloud account
+        @param port_id - unique port identifier
+        """
+        pass
+
+    @unsupported
+    def get_port_list(self, account):
+        """
+        Return a list of ports
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_port(self, account, port):
+        """
+        Create a new port
+
+        @param account - a cloud account
+        @param port    - port object
+        """
+        pass
+
+    @unsupported
+    def delete_port(self, account, port_id):
+        """
+        Delete a port
+
+        @param account - a cloud account
+        @param port_id - unique port identifier
+        """
+        pass
+
+    @unsupported
+    def add_host(self, account, host):
+        """
+        Add a new host
+
+        @param account - a cloud account
+        @param host    - a host object
+        """
+        pass
+
+    @unsupported
+    def remove_host(self, account, host_id):
+        """
+        Remove a host
+
+        @param account - a cloud account
+        @param host_id - unique host identifier
+        """
+        pass
+
+    @unsupported
+    def get_host(self, account, host_id):
+        """
+        Return a host
+
+        @param account - a cloud account
+        @param host_id - unique host identifier
+        """
+        pass
+
+    @unsupported
+    def get_host_list(self, account):
+        """
+        Return a list of hosts
+
+        @param account - a cloud account
+        """
+        pass
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/exceptions.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/exceptions.py
new file mode 100644
index 0000000..3bb3aa7
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/exceptions.py
@@ -0,0 +1,54 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+#
+# Rift Exceptions:
+#   These exceptions each coorespond with a rift status as they are defined
+# in rwtypes.vala.  Adding them here so that errors from C transistioning
+# back to python can be handled in a pythonic manner rather than having to
+# inspect return values.
+
+class RWErrorFailure(Exception):
+  pass
+
+class RWErrorDuplicate(Exception):
+  pass
+
+class RWErrorNotFound(Exception):
+  pass
+
+class RWErrorOutOfBounds(Exception):
+  pass
+
+class RWErrorBackpressure(Exception):
+  pass
+
+class RWErrorTimeout(Exception):
+  pass
+
+class RWErrorExists(Exception):
+  pass
+
+class RWErrorNotEmpty(Exception):
+  pass
+
+class RWErrorNotConnected(Exception):
+  pass
+
+class RWErrorNotSupported(Exception):
+  pass
+
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/image.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/image.py
new file mode 100644
index 0000000..620dcc4
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/image.py
@@ -0,0 +1,40 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import math
+import re
+
+from . import shell
+
+
+class ImageInfoError(Exception):
+    pass
+
+
+def qcow2_virtual_size_mbytes(qcow2_filepath):
+    info_output = shell.command("qemu-img info {}".format(qcow2_filepath))
+    for line in info_output:
+        if line.startswith("virtual size"):
+            match = re.search("\(([0-9]*) bytes\)", line)
+            if match is None:
+                raise ImageInfoError("Could not parse image size")
+
+            num_bytes = int(match.group(1))
+            num_mbytes = num_bytes / 1024 / 1024
+            return math.ceil(num_mbytes)
+
+    raise ImageInfoError("Could not image virtual size field in output")
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lvm.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lvm.py
new file mode 100644
index 0000000..4ae4de9
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lvm.py
@@ -0,0 +1,280 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import logging
+import os
+import re
+
+from . import shell
+
+
+logger = logging.getLogger(__name__)
+
+
+class PhysicalVolume(
+        collections.namedtuple(
+            "PhysicalVolume", [
+                "pv",
+                "vg",
+                "fmt",
+                "attr",
+                "psize",
+                "pfree",
+                ]
+            )
+        ):
+    pass
+
+
+class VolumeGroup(
+        collections.namedtuple(
+            "VolumeGroup", [
+                "vg",
+                "num_pv",
+                "num_lv",
+                "num_sn",
+                "attr",
+                "vsize",
+                "vfree",
+                ]
+            )
+        ):
+    pass
+
+
+class LoopbackVolumeGroup(object):
+    def __init__(self, name):
+        self._name = name
+
+    def __repr__(self):
+        return repr({
+            "name": self.name,
+            "filepath": self.filepath,
+            "loopback": self.loopback,
+            "exists": self.exists,
+            "volume_group": self.volume_group,
+            })
+
+    @property
+    def exists(self):
+        return any(v.vg == self.name for v in volume_groups())
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def filepath(self):
+        return find_backing_file(self.name)
+
+    @property
+    def loopback(self):
+        return find_loop_device(self.name)
+
+    @property
+    def volume_group(self):
+        for vgroup in volume_groups():
+            if vgroup.vg == self.name:
+                return vgroup
+
+    @property
+    def physical_volume(self):
+        for pvolume in physical_volumes():
+            if pvolume.vg == self.name:
+                return pvolume
+
+    @property
+    def size(self):
+        return os.path.getsize(self.filepath)
+
+    def extend_mbytes(self, num_mbytes):
+        """ Extend the size of the Loopback volume group
+
+        Arguments:
+            num_bytes - Number of megabytes to extend by
+        """
+
+        # Extend the size of the backing store
+        shell.command('truncate -c -s +{}M {}'.format(
+            num_mbytes, self.filepath)
+            )
+
+        # Notify loopback driver of the resized backing store
+        shell.command('losetup -c {}'.format(self.loopback))
+
+        # Expand the physical volume to match new size
+        shell.command('pvresize {}'.format(self.physical_volume.pv))
+
+
+def find_loop_device(volume):
+    pvolumes = physical_volumes()
+    for pvolume in pvolumes:
+        if pvolume.vg == volume:
+            return pvolume.pv
+
+    return None
+
+
+def find_backing_file(volume):
+    """
+    /dev/loop0: [64513]:414503 (/lvm/rift.img)
+
+    """
+    loop = find_loop_device(volume)
+    if loop is None:
+        return None
+
+    output = shell.command("losetup {}".format(loop))[0]
+    return re.search('.*\(([^)]*)\).*', output).group(1)
+
+
+def create(volume="rift", filepath="/lvm/rift.img"):
+    """
+    First, we create a loopback device using a file that we put in the file
+    system where running this from. Second, we create an LVM volume group onto
+    the loop device that was just created
+    """
+    pvolumes = physical_volumes()
+    for pvolume in pvolumes:
+        if pvolume.vg == volume:
+            raise ValueError("VolumeGroup %s already exists" % volume)
+
+    # Delete the existing backing file if it exists
+    if os.path.exists(filepath):
+        os.remove(filepath)
+
+    # Create the file that will be used as the backing store
+    if not os.path.exists(os.path.dirname(filepath)):
+        os.makedirs(os.path.dirname(filepath))
+
+    # Create a minimal file to hold any LVM physical volume metadata
+    shell.command('truncate -s 50M {}'.format(filepath))
+
+    # Acquire the next available loopback device
+    loopback = shell.command('losetup -f --show {}'.format(filepath))[0]
+
+    # Create a physical volume
+    shell.command('pvcreate {}'.format(loopback))
+
+    # Create a volume group
+    shell.command('vgcreate {} {}'.format(volume, loopback))
+
+    return LoopbackVolumeGroup(volume)
+
+
+def get(volume="rift"):
+    pvolumes = physical_volumes()
+    for pvolume in pvolumes:
+        if pvolume.vg == volume:
+            return LoopbackVolumeGroup(pvolume.vg)
+
+
+def destroy(volume="rift"):
+    pvolumes = physical_volumes()
+    for pvolume in pvolumes:
+        if pvolume.vg == volume:
+            break
+    else:
+        return
+
+    # Cache the backing file path
+    filepath = find_backing_file(volume)
+
+    # Remove the volume group
+    shell.command('vgremove -f {}'.format(pvolume.vg))
+
+    # Remove the physical volume
+    shell.command('pvremove -y {}'.format(pvolume.pv))
+
+    # Release the loopback device
+    shell.command('losetup -d {}'.format(pvolume.pv))
+
+    # Remove the backing file
+    os.remove(filepath)
+
+
+def physical_volumes():
+    """Returns a list of physical volumes"""
+    cmd = 'pvs --separator "," --rows'
+    lines = [line.strip().split(',') for line in shell.command(cmd)]
+    if not lines:
+        return []
+
+    mapping = {
+            "PV": "pv",
+            "VG": "vg",
+            "Fmt": "fmt",
+            "Attr": "attr",
+            "PSize": "psize",
+            "PFree": "pfree",
+            }
+
+    # Transpose the data so that the first element of the list is a list of
+    # keys.
+    transpose = list(map(list, zip(*lines)))
+
+    # Extract keys
+    keys = transpose[0]
+
+    # Iterate over the remaining data and create the physical volume objects
+    volumes = []
+    for values in transpose[1:]:
+        volume = {}
+        for k, v in zip(keys, values):
+            volume[mapping[k]] = v
+
+        volumes.append(PhysicalVolume(**volume))
+
+    return volumes
+
+
+def volume_groups():
+    """Returns a list of volume groups"""
+    cmd = 'vgs --separator "," --rows'
+    lines = [line.strip().split(',') for line in shell.command(cmd)]
+    if not lines:
+        return []
+
+    mapping = {
+            "VG": "vg",
+            "#PV": "num_pv",
+            "#LV": "num_lv",
+            "#SN": "num_sn",
+            "Attr": "attr",
+            "VSize": "vsize",
+            "VFree": "vfree",
+            }
+
+    # Transpose the data so that the first element of the list is a list of
+    # keys.
+    transpose = list(map(list, zip(*lines)))
+
+    # Extract keys
+    keys = transpose[0]
+
+    # Iterate over the remaining data and create the volume groups
+    groups = []
+    for values in transpose[1:]:
+        group = {}
+        for k, v in zip(keys, values):
+            group[mapping[k]] = v
+
+        groups.append(VolumeGroup(**group))
+
+    return groups
+
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lxc.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lxc.py
new file mode 100644
index 0000000..9cbde9d
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lxc.py
@@ -0,0 +1,534 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import contextlib
+import functools
+import logging
+import os
+import re
+import shutil
+import uuid
+
+from . import shell
+from . import image
+from . import lvm
+
+
+logger = logging.getLogger(__name__)
+
+
+class ValidationError(Exception):
+    pass
+
+
+@contextlib.contextmanager
+def mount(mountpoint, path):
+    """Mounts a device and unmounts it upon exit"""
+    shell.command('mount {} {}'.format(mountpoint, path))
+    logger.debug('mount {} {}'.format(mountpoint, path))
+    yield
+    # os.sync()
+    shell.command('umount {}'.format(path))
+    logger.debug('umount {}'.format(path))
+
+
+def create_container(name, template_path, volume, rootfs_qcow2file):
+    """Create a new container
+
+    Arguments:
+        name          - the name of the new container
+        template_path - the template defines the type of container to create
+        volume        - the volume group that the container will be in
+        roots_tarfile - a path to a tarfile that contains the rootfs
+
+    Returns:
+        A Container object for the new snapshot
+
+    """
+    cmd = 'lxc-create -t {} -n {} -B lvm --fssize {}M --vgname {}'
+    cmd += " -- --rootfs-qcow2file {}".format(rootfs_qcow2file)
+    cmd += " 2>&1 | tee -a /var/log/rift_lxc.log"
+    virtual_size_mbytes = image.qcow2_virtual_size_mbytes(rootfs_qcow2file)
+
+    loop_volume = lvm.get(volume)
+    loop_volume.extend_mbytes(virtual_size_mbytes)
+
+    shell.command(cmd.format(
+        template_path, name, virtual_size_mbytes, volume
+        ))
+
+    return Container(name, volume=volume, size_mbytes=virtual_size_mbytes)
+
+
+def create_snapshot(base, name, volume, size_mbytes):
+    """Create a clone of an existing container
+
+    Arguments:
+        base     - the name of the existing container
+        name     - the name to give to the clone
+        volume   - the volume group that the container will be in
+
+    Returns:
+        A Container object for the new snapshot
+
+    """
+    cmd = '/bin/bash lxc-clone -o {} -n {} --vgname {} --snapshot'
+
+    loop_volume = lvm.get(volume)
+    loop_volume.extend_mbytes(size_mbytes)
+
+    try:
+        shell.command(cmd.format(base, name, volume))
+
+    except shell.ProcessError as e:
+        # Skip the error that occurs here. It is corrected during configuration
+        # and results from a bug in the lxc script.
+
+        # In lxc-clone, when cloning multiple times from the same container
+        # it is possible that the lvrename operation fails to rename the
+        # file in /dev/rift (but the logical volume is renamed).
+        # This logic below resolves this particular scenario.
+        if "lxc-clone: failed to mount new rootfs" in str(e):
+            os.rmdir("/dev/rift/{name}".format(name=name))
+            shutil.move("/dev/rift/{name}_snapshot".format(name=name),
+                        "/dev/rift/{name}".format(name=name)
+                        )
+
+        elif "mkdir: cannot create directory" not in str(e):
+            raise
+
+    return Container(name, volume=volume, size_mbytes=size_mbytes)
+
+
+def purge_cache():
+    """Removes any cached templates"""
+    shell.command('rm -rf /var/cache/lxc/*')
+
+
+def force_clean():
+    """Force cleanup of the lxc directory"""
+
+    lxc_dir = "/var/lib/lxc/"
+    try:
+        shell.command('rm -rf {}*'.format(lxc_dir))
+    except shell.ProcessError:
+        for directory in os.listdir(lxc_dir):
+            path = os.path.join(lxc_dir, directory, "rootfs")
+            # Sometimes we might not be able to destroy container, if the
+            # device is still mounted so unmount it first.
+            shell.command("umount {}".format(path))
+            shell.command('rm -rf {}*'.format(lxc_dir))
+
+
+def containers():
+    """Returns a list of containers"""
+    return [c for c in shell.command('lxc-ls') if c]
+
+
+def destroy(name):
+    """Destroys a container
+
+    Arguments:
+        name - the name of the container to destroy
+
+    """
+    shell.command('lxc-destroy -n {}'.format(name))
+
+
+def start(name):
+    """Starts a container
+
+    Arguments:
+        name - the name of the container to start
+
+    """
+    shell.command('lxc-start -d -n {} -l DEBUG'.format(name))
+
+
+def stop(name):
+    """Stops a container
+
+    Arguments
+        name - the name of the container to start
+
+    """
+    shell.command('lxc-stop -n {}'.format(name))
+
+
+def state(name):
+    """Returns the current state of a container
+
+    Arguments:
+        name - the name of the container whose state is retuned
+
+    Returns:
+        A string describing the state of the container
+
+    """
+    _, state = shell.command('lxc-info -s -n {}'.format(name))[0].split()
+    return state
+
+
+def ls():
+    """Prints the output from 'lxc-ls --fancy'"""
+    print('\n'.join(shell.command('lxc-ls --fancy')))
+
+
+def ls_info():
+    lxc_info = shell.command('lxc-ls --fancy --active --fancy-format=name,ipv4')
+
+    lxc_to_ip = {}
+
+    line_regex = re.compile("(.*?)\.(.*?)\.(.*?)\.(.*?)\.")
+    for lxc in lxc_info:
+        if line_regex.match(lxc):
+            lxc_name = lxc.split()[0]
+
+            ips = lxc.split()[1:]
+            lxc_to_ip[lxc_name] = [ip.replace(",", "") for ip in ips]
+
+    return lxc_to_ip
+
+
+def validate(f):
+    """
+    This decorator is used to check that a given container exists. If the
+    container does not exist, a ValidationError is raised.
+
+    """
+    @functools.wraps(f)
+    def impl(self, *args, **kwargs):
+        if self.name not in containers():
+            msg = 'container ({}) does not exist'.format(self.name)
+            raise ValidationError(msg)
+
+        return f(self, *args, **kwargs)
+
+    return impl
+
+
+class Container(object):
+    """
+    This class provides an interface to an existing container on the system.
+    """
+
+    def __init__(self, name, size_mbytes=4096, volume="rift", hostname=None):
+        self._name = name
+        self._size_mbytes = size_mbytes
+        self._volume = volume
+        self.hostname = name if hostname is None else hostname
+
+    @property
+    def name(self):
+        """The name of the container"""
+        return self._name
+
+    @property
+    def size(self):
+        """The virtual size of the container"""
+        return self._size_mbytes
+
+    @property
+    def volume(self):
+        """The volume that the container is a part of"""
+        return self._volume
+
+    @property
+    def loopback_volume(self):
+        """ Instance of lvm.LoopbackVolumeGroup """
+        return lvm.get(self.volume)
+
+    @property
+    @validate
+    def state(self):
+        """The current state of the container"""
+        return state(self.name)
+
+    @validate
+    def start(self):
+        """Starts the container"""
+        start(self.name)
+
+    @validate
+    def stop(self):
+        """Stops the container"""
+        stop(self.name)
+
+    @validate
+    def destroy(self):
+        """Destroys the container"""
+        destroy(self.name)
+
+    @validate
+    def info(self):
+        """Returns info about the container"""
+        return shell.command('lxc-info -n {}'.format(self.name))
+
+    @validate
+    def snapshot(self, name):
+        """Create a snapshot of this container
+
+        Arguments:
+            name - the name of the snapshot
+
+        Returns:
+            A Container representing the new snapshot
+
+        """
+        return create_snapshot(self.name, name, self.volume, self.size)
+
+    @validate
+    def configure(self, config, volume='rift', userdata=None):
+        """Configures the container
+
+        Arguments:
+            config   - a container configuration object
+            volume   - the volume group that the container will belong to
+            userdata - a string containing userdata that will be passed to
+                       cloud-init for execution
+
+        """
+        # Create the LXC config file
+        with open("/var/lib/lxc/{}/config".format(self.name), "w") as fp:
+            fp.write(str(config))
+            logger.debug('created /var/lib/lxc/{}/config'.format(self.name))
+
+        # Mount the rootfs of the container and configure the hosts and
+        # hostname files of the container.
+        rootfs = '/var/lib/lxc/{}/rootfs'.format(self.name)
+        os.makedirs(rootfs, exist_ok=True)
+
+        with mount('/dev/rift/{}'.format(self.name), rootfs):
+
+            # Create /etc/hostname
+            with open(os.path.join(rootfs, 'etc/hostname'), 'w') as fp:
+                fp.write(self.hostname + '\n')
+                logger.debug('created /etc/hostname')
+
+            # Create /etc/hostnames
+            with open(os.path.join(rootfs, 'etc/hosts'), 'w') as fp:
+                fp.write("127.0.0.1 localhost {}\n".format(self.hostname))
+                fp.write("::1 localhost {}\n".format(self.hostname))
+                logger.debug('created /etc/hosts')
+
+            # Disable autofs (conflicts with lxc workspace mount bind)
+            autofs_service_file = os.path.join(
+                    rootfs,
+                    "etc/systemd/system/multi-user.target.wants/autofs.service",
+                    )
+            if os.path.exists(autofs_service_file):
+                os.remove(autofs_service_file)
+
+            # Setup the mount points
+            for mount_point in config.mount_points:
+                mount_point_path = os.path.join(rootfs, mount_point.remote)
+                os.makedirs(mount_point_path, exist_ok=True)
+
+            # Copy the cloud-init script into the nocloud seed directory
+            if userdata is not None:
+                try:
+                    userdata_dst = os.path.join(rootfs, 'var/lib/cloud/seed/nocloud/user-data')
+                    os.makedirs(os.path.dirname(userdata_dst))
+                except FileExistsError:
+                    pass
+
+                try:
+                    with open(userdata_dst, 'w') as fp:
+                        fp.write(userdata)
+                except Exception as e:
+                    logger.exception(e)
+
+                # Cloud init requires a meta-data file in the seed location
+                metadata = "instance_id: {}\n".format(str(uuid.uuid4()))
+                metadata += "local-hostname: {}\n".format(self.hostname)
+
+                try:
+                    metadata_dst = os.path.join(rootfs, 'var/lib/cloud/seed/nocloud/meta-data')
+                    with open(metadata_dst, 'w') as fp:
+                        fp.write(metadata)
+
+                except Exception as e:
+                    logger.exception(e)
+
+
+class ContainerConfig(object):
+    """
+    This class represents the config file that is used to define the interfaces
+    on a container.
+    """
+
+    def __init__(self, name, volume='rift'):
+        self.name = name
+        self.volume = volume
+        self.networks = []
+        self.mount_points = []
+        self.cgroups = ControlGroupsConfig()
+
+    def add_network_config(self, network_config):
+        """Add a network config object
+
+        Arguments:
+            network_config - the network config object to add
+
+        """
+        self.networks.append(network_config)
+
+    def add_mount_point_config(self, mount_point_config):
+        """Add a mount point to the configuration
+
+        Arguments,
+            mount_point_config - a MountPointConfig object
+
+        """
+        self.mount_points.append(mount_point_config)
+
+    def __repr__(self):
+        fields = """
+            lxc.rootfs = /dev/{volume}/{name}
+            lxc.utsname = {utsname}
+            lxc.tty = 4
+            lxc.pts = 1024
+            lxc.mount = /var/lib/lxc/{name}/fstab
+            lxc.cap.drop = sys_module mac_admin mac_override sys_time
+            lxc.kmsg = 0
+            lxc.autodev = 1
+            lxc.kmsg = 0
+            """.format(volume=self.volume, name=self.name, utsname=self.name)
+
+        fields = '\n'.join(n.strip() for n in fields.splitlines())
+        cgroups = '\n'.join(n.strip() for n in str(self.cgroups).splitlines())
+        networks = '\n'.join(str(n) for n in self.networks)
+        mount_points = '\n'.join(str(n) for n in self.mount_points)
+
+        return '\n'.join((fields, cgroups, networks, mount_points))
+
+
+class ControlGroupsConfig(object):
+    """
+    This class represents the control group configuration for a container
+    """
+
+    def __repr__(self):
+        return """
+            #cgroups
+            lxc.cgroup.devices.deny = a
+
+            # /dev/null and zero
+            lxc.cgroup.devices.allow = c 1:3 rwm
+            lxc.cgroup.devices.allow = c 1:5 rwm
+
+            # consoles
+            lxc.cgroup.devices.allow = c 5:1 rwm
+            lxc.cgroup.devices.allow = c 5:0 rwm
+            lxc.cgroup.devices.allow = c 4:0 rwm
+            lxc.cgroup.devices.allow = c 4:1 rwm
+
+            # /dev/{,u}random
+            lxc.cgroup.devices.allow = c 1:9 rwm
+            lxc.cgroup.devices.allow = c 1:8 rwm
+            lxc.cgroup.devices.allow = c 136:* rwm
+            lxc.cgroup.devices.allow = c 5:2 rwm
+
+            # rtc
+            lxc.cgroup.devices.allow = c 254:0 rm
+            """
+
+
+class NetworkConfig(collections.namedtuple(
+    "NetworkConfig", [
+        "type",
+        "link",
+        "flags",
+        "name",
+        "veth_pair",
+        "ipv4",
+        "ipv4_gateway",
+        ]
+    )):
+    """
+    This class represents a network interface configuration for a container.
+    """
+
+    def __new__(cls,
+            type,
+            link,
+            name,
+            flags='up',
+            veth_pair=None,
+            ipv4=None,
+            ipv4_gateway=None,
+            ):
+        return super(NetworkConfig, cls).__new__(
+                cls,
+                type,
+                link,
+                flags,
+                name,
+                veth_pair,
+                ipv4,
+                ipv4_gateway,
+                )
+
+    def __repr__(self):
+        fields = [
+                "lxc.network.type = {}".format(self.type),
+                "lxc.network.link = {}".format(self.link),
+                "lxc.network.flags = {}".format(self.flags),
+                "lxc.network.name = {}".format(self.name),
+                ]
+
+        if self.veth_pair is not None:
+            fields.append("lxc.network.veth.pair = {}".format(self.veth_pair))
+
+        if self.ipv4 is not None:
+            fields.append("lxc.network.ipv4 = {}/24".format(self.ipv4))
+
+        if self.ipv4_gateway is not None:
+            fields.append("lxc.network.ipv4.gateway = {}".format(self.ipv4_gateway))
+
+        header = ["# Start {} configuration".format(self.name)]
+        footer = ["# End {} configuration\n".format(self.name)]
+
+        return '\n'.join(header + fields + footer)
+
+
+class MountConfig(collections.namedtuple(
+    "ContainerMountConfig", [
+        "local",
+        "remote",
+        "read_only",
+        ]
+    )):
+    """
+    This class represents a mount point configuration for a container.
+    """
+
+    def __new__(cls, local, remote, read_only=True):
+        return super(MountConfig, cls).__new__(
+                cls,
+                local,
+                remote,
+                read_only,
+                )
+
+    def __repr__(self):
+        return "lxc.mount.entry = {} {} none {}bind 0 0\n".format(
+                self.local,
+                self.remote,
+                "" if not self.read_only else "ro,"
+                )
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/net.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/net.py
new file mode 100644
index 0000000..517356b
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/net.py
@@ -0,0 +1,147 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+
+import netifaces
+
+from . import shell
+
+
+class VirshError(Exception):
+    pass
+
+
+def create(network, ip_interface=None):
+    """ Create, assign ip and bring up a bridge interface
+
+    Arguments:
+        network - The network name
+        ip_interface - An ipaddress.IPv4Interface instance
+    """
+    bridge_add(network)
+    if ip_interface is not None:
+        bridge_addr(
+                network,
+                str(ip_interface),
+                str(ip_interface.network.broadcast_address),
+                )
+    bridge_up(network)
+
+
+def delete(network):
+    bridge_down(network)
+    bridge_remove(network)
+
+
+def bridge_add(network):
+    shell.command("/usr/sbin/brctl addbr {network}".format(network=network))
+
+
+def bridge_remove(network):
+    shell.command("/usr/sbin/brctl delbr {network}".format(network=network))
+
+
+def bridge_addr(network, addr, broadcast):
+    cmd = "ip addr add {addr} broadcast {broadcast} dev {network}"
+    shell.command(cmd.format(addr=addr, broadcast=broadcast, network=network))
+
+
+def bridge_exists(network):
+    return network in netifaces.interfaces()
+
+
+def bridge_down(network):
+    shell.command('ip link set {network} down'.format(network=network))
+
+
+def bridge_up(network):
+    shell.command('ip link set {network} up'.format(network=network))
+
+
+def bridge_addresses(network):
+    try:
+        address = netifaces.ifaddresses(network)[netifaces.AF_INET][0]
+
+    except KeyError:
+        raise ValueError('unable to find subnet for {}'.format(network))
+
+    cls = collections.namedtuple('BridgeAddresses', 'addr netmask broadcast')
+    return cls(**address)
+
+
+VirshNetwork = collections.namedtuple(
+    'VirshNetwork', 'name state autostart persistant')
+
+
+def virsh_list_networks():
+    lines = shell.command('virsh net-list --all')
+    if len(lines) < 2:
+        raise Exception("Expected two lines from virsh net-list output")
+
+    network_lines = lines[2:]
+    virsh_networks = []
+    for line in network_lines:
+        if not line.strip():
+            continue
+
+        (name, state, autostart, persistant) = line.split()
+        virsh_networks.append(
+                VirshNetwork(name, state, autostart, persistant)
+                )
+
+    return virsh_networks
+
+
+def virsh_list_network_names():
+    virsh_networks = virsh_list_networks()
+    return [n.name for n in virsh_networks]
+
+
+def virsh_is_active(network_name):
+    virsh_networks = virsh_list_networks()
+    for network in virsh_networks:
+        if network.name == network_name:
+            return network.state == "active"
+
+    raise VirshError("Did not find virsh network %s" % network_name)
+
+
+def virsh_define_default():
+    shell.command('virsh net-define /usr/share/libvirt/networks/default.xml')
+
+
+def virsh_start(network_name):
+    shell.command('virsh net-start %s' % network_name)
+
+
+def virsh_initialize_default():
+    if "default" not in virsh_list_network_names():
+        virsh_define_default()
+
+    if virsh_is_active("default"):
+        if bridge_exists("virbr0"):
+            bridge_down("virbr0")
+
+        virsh_destroy("default")
+
+    virsh_start("default")
+
+
+def virsh_destroy(network_name):
+    shell.command('virsh net-destroy %s' % network_name)
+
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/shell.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/shell.py
new file mode 100644
index 0000000..41a96ae
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/shell.py
@@ -0,0 +1,46 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+import subprocess
+
+
+logger = logging.getLogger(__name__)
+
+
+class ProcessError(Exception):
+    pass
+
+
+def command(cmd):
+    logger.debug('executing: {}'.format(cmd))
+
+    process = subprocess.Popen(
+            cmd,
+            shell=True,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            )
+
+    stdout, stderr = process.communicate()
+    process.wait()
+
+    if process.returncode != 0:
+        raise ProcessError(stderr.decode())
+
+    return stdout.decode().splitlines()
+
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rwcal_cloudsim.py b/rwcal/plugins/vala/rwcal_cloudsim/rwcal_cloudsim.py
new file mode 100644
index 0000000..6da8a2e
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsim/rwcal_cloudsim.py
@@ -0,0 +1,1430 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import hashlib
+import itertools
+import logging
+import os
+import time
+import uuid
+
+import ipaddress
+
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang,
+    )
+
+import rw_status
+import rift.cal.rwcal_status as rwcal_status
+import rwlogger
+
+import rift.rwcal.cloudsim.lxc as lxc
+import rift.rwcal.cloudsim.lvm as lvm
+import rift.rwcal.cloudsim.net as net
+import rift.rwcal.cloudsim.exceptions as exceptions
+
+logger = logging.getLogger('rwcal.cloudsim')
+
+rwstatus_exception_map = { IndexError: RwTypes.RwStatus.NOTFOUND,
+                           KeyError: RwTypes.RwStatus.NOTFOUND,
+                           NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}
+
+rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
+rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
+
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+class ImageLocationError(Exception):
+    pass
+
+
+class CreateNetworkError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class Resources(object):
+    def __init__(self):
+        self.images = dict()
+
+
+def rwcal_copy_object(obj):
+    dup = obj.__class__()
+    dup.copy_from(obj)
+    return dup
+
+
+MGMT_NETWORK_NAME = "virbr0"
+MGMT_NETWORK_INTERFACE_IP = ipaddress.IPv4Interface("192.168.122.1/24")
+
+
+class IPPoolError(Exception):
+    pass
+
+
+class NetworkIPPool(object):
+    def __init__(self, subnet):
+        self._network = ipaddress.IPv4Network(subnet)
+        self._ip_gen = self._network.hosts()
+        self._allocated_ips = []
+        self._unallocated_ips = []
+
+    def allocate_ip(self):
+        try:
+            ip = str(next(self._ip_gen))
+        except StopIteration:
+            try:
+                ip = self._unallocated_ips.pop()
+            except IndexError:
+                raise IPPoolError("All ip addresses exhausted")
+
+        self._allocated_ips.append(ip)
+        return ip
+
+    def deallocate_ip(self, ip):
+        if ip not in self._allocated_ips:
+            raise ValueError("Did not find IP %s in allocate ip pool")
+
+        self._allocated_ips.remove(ip)
+        self._unallocated_ips.append(ip)
+
+
+class CalManager(object):
+    def __init__(self):
+        self._vms = {}
+        self._ports = {}
+        self._images = {}
+        self._networks = {}
+        self.flavors = {}
+
+        self._port_to_vm = {}
+        self._vm_to_image = {}
+        self._port_to_network = {}
+        self._network_to_ip_pool = {}
+
+        self._vm_to_ports = collections.defaultdict(list)
+        self._image_to_vms = collections.defaultdict(list)
+        self._network_to_ports = collections.defaultdict(list)
+
+        self._vm_id_gen = itertools.count(1)
+        self._network_id_gen = itertools.count(1)
+        self._image_id_gen = itertools.count(1)
+
+    def add_image(self, image):
+        image_id = str(next(self._image_id_gen))
+        self._images[image_id] = image
+
+        return image_id
+
+    def remove_image(self, image_id):
+        for vm_id in self.get_image_vms(image_id):
+            self.remove_vm(vm_id)
+
+        del self._images[image_id]
+        del self._image_to_vms[image_id]
+
+    def get_image(self, image_id):
+        if image_id not in self._images:
+            msg = "Unable to find image {}"
+            raise exceptions.RWErrorNotFound(msg.format(image_id))
+
+        return self._images[image_id]
+
+    def get_image_list(self):
+        return list(self._images.values())
+
+    def get_image_vms(self, image_id):
+        if image_id not in self._images:
+            msg = "Unable to find image {}"
+            raise exceptions.RWErrorNotFound(msg.format(image_id))
+
+        return self._image_to_vms[image_id]
+
+    def add_port(self, network_id, vm_id, port):
+        if network_id not in self._networks:
+            msg = "Unable to find network {}"
+            raise exceptions.RWErrorNotFound(msg.format(network_id))
+
+        if vm_id not in self._vms:
+            msg = "Unable to find vm {}"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        port_id = str(uuid.uuid4())
+        self._ports[port_id] = port
+
+        self._vm_to_ports[vm_id].append(port_id)
+        self._network_to_ports[network_id].append(port_id)
+
+        self._port_to_vm[port_id] = vm_id
+        self._port_to_network[port_id] = network_id
+
+        return port_id
+
+    def remove_port(self, port_id):
+        if port_id not in self._ports:
+            msg = "Unable to find port {}"
+            raise exceptions.RWErrorNotFound(msg.format(port_id))
+
+        network_id = self._port_to_network[port_id]
+        vm_id = self._port_to_vm[port_id]
+
+        self._vm_to_ports[vm_id].remove(port_id)
+        self._network_to_ports[network_id].remove(port_id)
+
+        del self._ports[port_id]
+        del self._port_to_vm[port_id]
+        del self._port_to_network[port_id]
+
+    def get_port(self, port_id):
+        return self._ports[port_id]
+
+    def get_port_list(self):
+        return list(self._ports.values())
+
+    def add_network(self, network):
+        network_id = str(next(self._network_id_gen))
+        self._networks[network_id] = network
+
+        return network_id
+
+    def remove_network(self, network_id):
+        for port_id in self.get_network_ports(network_id):
+            self.remove_port(port_id)
+
+        del self._networks[network_id]
+
+    def get_network(self, network_id):
+        return self._networks[network_id]
+
+    def add_network_ip_pool(self, network_id, ip_pool):
+        self._network_to_ip_pool[network_id] = ip_pool
+
+    def get_network_ip_pool(self, network_id):
+        return self._network_to_ip_pool[network_id]
+
+    def remove_network_ip_pool(self, network_id):
+        del self._network_to_ip_pool[network_id]
+
+    def get_network_list(self):
+        return list(self._networks.values())
+
+    def get_network_ports(self, network_id):
+        return self._network_to_ports[network_id]
+
+    def add_vm(self, image_id, vm):
+        if image_id not in self._images:
+            msg = "Unable to find image {}"
+            raise exceptions.RWErrorNotFound(msg.format(image_id))
+
+        vm_id = str(next(self._vm_id_gen))
+        self._vms[vm_id] = vm
+
+        self._vm_to_image[vm_id] = image_id
+        self._image_to_vms[image_id].append(vm_id)
+
+        return vm_id
+
+    def remove_vm(self, vm_id):
+        for port_id in self.get_vm_ports(vm_id):
+            self.remove_port(port_id)
+
+        image_id = self._vm_to_image[vm_id]
+
+        self._image_to_vms[image_id].remove(vm_id)
+
+        del self._vms[vm_id]
+        del self._vm_to_image[vm_id]
+
+    def get_vm(self, vm_id):
+        return self._vms[vm_id]
+
+    def get_vm_list(self):
+        return list(self._vms.values())
+
+    def get_vm_ports(self, vm_id):
+        return self._vm_to_ports[vm_id]
+
+
+class LxcManager(object):
+    def __init__(self):
+        self._containers = {}
+        self._ports = {}
+        self._bridges = {}
+
+        self._port_to_container = {}
+        self._port_to_bridge = {}
+
+        self._container_to_ports = collections.defaultdict(list)
+        self._bridge_to_ports = collections.defaultdict(list)
+
+        # Create the management network
+        self.mgmt_network = RwcalYang.NetworkInfoItem()
+        self.mgmt_network.network_name = MGMT_NETWORK_NAME
+
+        network = MGMT_NETWORK_INTERFACE_IP.network
+        self.mgmt_network.subnet = str(network)
+
+        # Create/Start the default virtd network for NAT-based
+        # connectivity inside containers (http://wiki.libvirt.org/page/Networking)
+        if "default" not in net.virsh_list_network_names():
+            logger.debug("default virtd network not found.  Creating.")
+            net.virsh_define_default()
+
+            # The default virsh profile create a virbr0 interface
+            # with a 192.168.122.1 ip address.  Also sets up iptables
+            # for NAT access.
+            net.virsh_start("default")
+
+        # Create the IP pool
+        mgmt_network_hosts = network.hosts()
+
+        # Remove the management interface ip from the pool
+        self._mgmt_ip_pool = list(mgmt_network_hosts)
+        self._mgmt_ip_pool.remove(MGMT_NETWORK_INTERFACE_IP.ip)
+
+    def acquire_mgmt_ip(self):
+        """Returns an IP address from the available pool"""
+        # TODO these ips will need to be recycled at some point
+        return str(self._mgmt_ip_pool.pop())
+
+    def add_port(self, bridge_id, container_id, port):
+        if bridge_id not in self._bridges:
+            msg = "Unable to find bridge {}"
+            raise exceptions.RWErrorNotFound(msg.format(bridge_id))
+
+        if container_id not in self._containers:
+            msg = "Unable to find container {}"
+            raise exceptions.RWErrorNotFound(msg.format(container_id))
+
+        port_id = str(uuid.uuid4())
+        self._ports[port_id] = port
+
+        self._container_to_ports[container_id].append(port_id)
+        self._bridge_to_ports[bridge_id].append(port_id)
+
+        self._port_to_container[port_id] = container_id
+        self._port_to_bridge[port_id] = bridge_id
+
+        return port_id
+
+    def remove_port(self, port_id):
+        if port_id not in self._ports:
+            msg = "Unable to find port {}"
+            raise exceptions.RWErrorNotFound(msg.format(port_id))
+
+        bridge_id = self._port_to_bridge[port_id]
+        container_id = self._port_to_container[port_id]
+
+        self._container_to_ports[container_id].remove(port_id)
+        self._bridge_to_ports[bridge_id].remove(port_id)
+
+        del self._ports[port_id]
+        del self._port_to_bridge[port_id]
+        del self._port_to_container[port_id]
+
+    def get_port(self, port_id):
+        return self._ports[port_id]
+
+    def add_bridge(self, bridge):
+        bridge_id = str(uuid.uuid4())
+        self._bridges[bridge_id] = bridge
+
+        return bridge_id
+
+    def remove_bridge(self, bridge_id):
+        for port_id in self._bridge_to_ports[bridge_id]:
+            self.remove_port(port_id)
+
+        del self._bridges[bridge_id]
+
+    def get_bridge(self, bridge_id):
+        return self._bridges[bridge_id]
+
+    def get_bridge_ports(self, bridge_id):
+        port_ids = self._bridge_to_ports[bridge_id]
+        return [self.get_port(port_id) for port_id in port_ids]
+
+    def add_container(self, container):
+        container_id = str(uuid.uuid4())
+        self._containers[container_id] = container
+
+        return container_id
+
+    def remove_container(self, container_id):
+        for port_id in self.get_container_ports(container_id):
+            self.remove_port(port_id)
+
+        del self._containers[container_id]
+
+    def get_container(self, container_id):
+        return self._containers[container_id]
+
+    def get_container_ports(self, container_id):
+        return self._container_to_ports[container_id]
+
+
+
+class Datastore(object):
+    """
+    This class is used to store data that is shared among different instance of
+    the Container class.
+    """
+    def __init__(self):
+        self.lxc_manager = LxcManager()
+        self.cal_manager = CalManager()
+        self.cal_to_lxc = {'image': {}, 'port': {}, 'network': {}, 'vm': {}}
+        self.last_index = 0
+
+
+class CloudSimPlugin(GObject.Object, RwCal.Cloud):
+    # HACK this is a work-around for sharing/persisting container information.
+    # This will only work for instances of CloudSimPlugin that are within the
+    # same process. Thus, it works in collapsed mode, but will not work in
+    # expanded mode. At the point where it is necessary to persist this
+    # information in expanded mode, we will need to find a better solution.
+    datastore = None
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        if CloudSimPlugin.datastore is None:
+            CloudSimPlugin.datastore = Datastore()
+
+    @property
+    def lxc(self):
+        return CloudSimPlugin.datastore.lxc_manager
+
+    @property
+    def cal(self):
+        return CloudSimPlugin.datastore.cal_manager
+
+    @property
+    def volume_group(self):
+        return lvm.get("rift")
+
+    @property
+    def cal_to_lxc(self):
+        return CloudSimPlugin.datastore.cal_to_lxc
+
+    def next_snapshot_name(self):
+        """Generates a new snapshot name for a container"""
+        CloudSimPlugin.datastore.last_index += 1
+        return 'rws{}'.format(CloudSimPlugin.datastore.last_index)
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-cal-log",
+                    subcategory="cloudsim",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        If creds are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus(
+                status="success",
+                details=""
+                )
+
+        return status
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        """Returns the management network
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            a NetworkInfo object
+
+        """
+        return self.lxc.mgmt_network
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        """
+        Create a new tenant.
+
+        @param name     - name to assign to the tenant.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """
+        delete a tenant.
+
+        @param tenant_id     - id of tenant to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """
+        List tenants.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        """
+        Create a new role.
+
+        @param name         - name to assign to the role.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """
+        delete a role.
+
+        @param role_id     - id of role to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """
+        List roles.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        """Create a new image
+
+        Creates a new container based upon the template and tarfile specified.
+        Only one image is currently supported for a given instance of the CAL.
+
+        Arguments:
+            account - a cloud account
+            image   - an ImageInfo object
+
+        Raises:
+            An RWErrorDuplicate is raised if create_image is called and there
+            is already an image.
+
+        Returns:
+            The UUID of the new image
+
+        """
+        def file_md5(path, block_size=2 ** 20):
+            """
+            Block size directly depends on the block size of your filesystem
+            to avoid performances issues.
+            """
+            md5 = hashlib.md5()
+            with open(path, 'rb') as f:
+                for chunk in iter(lambda: f.read(block_size), b''):
+                    md5.update(chunk)
+
+            return md5.hexdigest()
+
+        current_images = self.cal.get_image_list()
+        lxc_name = "rwm{}".format(len(current_images))
+
+        if not image.has_field("disk_format"):
+            logger.warning("Image disk format not provided assuming qcow2")
+            image.disk_format = "qcow2"
+
+        if image.disk_format not in ["qcow2"]:
+            msg = "Only qcow2 currently supported for container CAL"
+            raise exceptions.RWErrorNotSupported(msg)
+
+        logger.debug('Calculating IMAGE checksum...')
+        image.checksum = file_md5(image.location)
+        logger.debug("Calculated image checksum: %s", image.checksum)
+        image.state = 'active'
+
+        container = lxc.create_container(
+                name=lxc_name,
+                template_path=os.path.join(
+                        os.environ['RIFT_INSTALL'],
+                        "etc/lxc-fedora-rift.lxctemplate",
+                        ),
+                volume="rift",
+                rootfs_qcow2file=image.location,
+                )
+
+
+        # Add the images to the managers
+        cal_image_id = self.cal.add_image(image)
+        lxc_image_id = self.lxc.add_container(container)
+
+        # Create the CAL to LXC mapping
+        self.cal_to_lxc["image"][cal_image_id] = lxc_image_id
+
+        image.id = cal_image_id
+
+        return image.id
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """Deletes an image
+
+        This function will remove the record of the image from the CAL and
+        destroy the associated container.
+
+        Arguments:
+            account  - a cloud account
+            image_id - the UUID of the image to delete
+
+        Raises:
+            An RWErrorNotEmpty exception is raised if there are VMs based on
+            this image (the VMs need to be deleted first). An RWErrorNotFound
+            is raised if the image_id does not match any of the known images.
+
+        """
+        container_id = self.cal_to_lxc["image"][image_id]
+        container = self.lxc.get_container(container_id)
+
+        # Stop the image and destroy it (NB: it should not be necessary to stop
+        # the container, but just in case)
+        container.stop()
+        container.destroy()
+
+        self.cal.remove_image(image_id)
+        self.lxc.remove_container(container_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        """Returns the specified image
+
+        Arguments:
+            account  - a cloud account
+            image_id - the UUID of the image to retrieve
+
+        Raises:
+            An RWErrorNotFound exception is raised if the image_id does not
+            match any of the known images.
+
+        Returns:
+            An image object
+
+        """
+        return self.cal.get_image(image_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """Returns a list of images"""
+        resources = RwcalYang.VimResources()
+        for image in self.cal.get_image_list():
+            resources.imageinfo_list.append(rwcal_copy_object(image))
+
+        return resources
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        """Create a VM
+
+        Arguments:
+            vm - the VM info used to define the desire VM
+
+        Raises:
+            An RWErrorFailure is raised if there is not
+
+        Returns:
+            a string containing the unique id of the created VM
+
+        """
+        # Retrieve the container that will be used as the base of the snapshot
+        container_id = self.cal_to_lxc["image"][vm.image_id]
+        container = self.lxc.get_container(container_id)
+
+        # Create a container snapshot
+        snapshot = container.snapshot(self.next_snapshot_name())
+        snapshot.hostname = vm.vm_name
+
+        # Register the vm and container
+        snapshot_id = self.lxc.add_container(snapshot)
+        vm.vm_id = self.cal.add_vm(vm.image_id, vm)
+
+        self.cal_to_lxc["vm"][vm.vm_id] = snapshot_id
+
+        return vm.vm_id
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """Starts the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to start
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        if vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        container_id = self.cal_to_lxc["vm"][vm_id]
+
+        snapshot = self.lxc.get_container(container_id)
+        port_ids = self.lxc.get_container_ports(container_id)
+
+        config = lxc.ContainerConfig(snapshot.name)
+
+        for port_id in port_ids:
+            port = self.lxc.get_port(port_id)
+            config.add_network_config(port)
+
+        vm = self.cal.get_vm(vm_id)
+
+        # Set the management IP on the vm if not yet set
+        if not vm.has_field("management_ip"):
+            mgmt_ip = self.lxc.acquire_mgmt_ip()
+            vm.management_ip = mgmt_ip
+
+        # Add the management interface
+        config.add_network_config(
+                lxc.NetworkConfig(
+                    type="veth",
+                    link=self.lxc.mgmt_network.network_name,
+                    name="eth0",
+                    ipv4=vm.management_ip,
+                    ipv4_gateway='auto',
+                    )
+                )
+
+        # Add rift root as a mount point
+        config.add_mount_point_config(
+            lxc.MountConfig(
+                local=os.environ["RIFT_ROOT"],
+                remote=os.environ["RIFT_ROOT"][1:],
+                read_only=False,
+                )
+            )
+
+        userdata=None
+        if vm.cloud_init.has_field("userdata"):
+            userdata = vm.cloud_init.userdata
+
+        snapshot.configure(config, userdata=userdata)
+        # For some reason, the cloud-init fails or runs only partially when
+        # you start the container immediately after writing the config files.
+        # A sleep of 1 sec seems to magically fix the issue!!
+        time.sleep(1)
+        snapshot.start()
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """Stops the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to stop
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        if vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        # Stop the container
+        container_id = self.cal_to_lxc["vm"][vm_id]
+        snapshot = self.lxc.get_container(container_id)
+        snapshot.stop()
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """Deletes the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        if vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        container_id = self.cal_to_lxc["vm"][vm_id]
+
+        snapshot = self.lxc.get_container(container_id)
+        snapshot.stop()
+        snapshot.destroy()
+
+        self.cal.remove_vm(vm_id)
+        self.lxc.remove_container(container_id)
+
+        # TODO: Recycle management ip
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """
+        reboot a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        self.do_stop_vm(account, vm_id, no_rwstatus=True)
+        self.do_start_vm(account, vm_id, no_rwstatus=True)
+
+    @rwstatus
+    def do_get_vm(self, account, vm_id):
+        """Returns the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to return
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        Returns:
+            a VMInfoItem object
+
+        """
+        if vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        return self.cal.get_vm(vm_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        """Returns the a list of the VMs known to the driver
+
+        Returns:
+            a list of VMInfoItem objects
+
+        """
+        resources = RwcalYang.VimResources()
+        for vm in self.cal.get_vm_list():
+            resources.vminfo_list.append(rwcal_copy_object(vm))
+
+        return resources
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        """
+        create new flavor.
+
+        @param flavor   - Flavor object
+        """
+        flavor_id = str(uuid.uuid4())
+        flavor.id = flavor_id
+        self.cal.flavors[flavor_id] = flavor
+        logger.debug('Created flavor: {}'.format(flavor_id))
+        return flavor_id
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """
+        Delete flavor.
+
+        @param flavor_id     - Flavor id to be deleted.
+        """
+        logger.debug('Deleted flavor: {}'.format(flavor_id))
+        self.cal.flavors.pop(flavor_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        """
+        Return the specified flavor
+
+        @param flavor_id - the id of the flavor to return
+        """
+        flavor = self.cal.flavors[flavor_id]
+        logger.debug('Returning flavor-info for : {}'.format(flavor_id))
+        return flavor
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """
+        Return a list of flavors
+        """
+        vim_resources = RwcalYang.VimResources()
+        for flavor in self.cal.flavors.values():
+            f = RwcalYang.FlavorInfoItem()
+            f.copy_from(flavor)
+            vim_resources.flavorinfo_list.append(f)
+        logger.debug("Returning list of flavor-info of size: %d", len(vim_resources.flavorinfo_list))
+        return vim_resources
+
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        """Create a port between a network and a virtual machine
+
+        Arguments:
+            account - a cloud account
+            port    - a description of port to create
+
+        Raises:
+            Raises an RWErrorNotFound exception if either the network or the VM
+            associated with the port cannot be found.
+
+        Returns:
+            the ID of the newly created port.
+
+        """
+        if port.network_id not in self.cal_to_lxc["network"]:
+            msg = 'Unable to find the specified network ({})'
+            raise exceptions.RWErrorNotFound(msg.format(port.network_id))
+
+        if port.vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(port.vm_id))
+
+        if port.has_field("ip_address"):
+            raise exceptions.RWErrorFailure("IP address of the port must not be specific")
+
+        network = self.cal.get_network(port.network_id)
+        ip_pool = self.cal.get_network_ip_pool(port.network_id)
+        port.ip_address = ip_pool.allocate_ip()
+
+        net_config = lxc.NetworkConfig(
+                type='veth',
+                link=network.network_name[:15],
+                name="veth" + str(uuid.uuid4())[:10],
+                ipv4=port.ip_address,
+                )
+
+        lxc_network_id = self.cal_to_lxc["network"][port.network_id]
+        lxc_vm_id = self.cal_to_lxc["vm"][port.vm_id]
+
+        cal_port_id = self.cal.add_port(port.network_id, port.vm_id, port)
+        lxc_port_id = self.lxc.add_port(lxc_network_id, lxc_vm_id, net_config)
+
+        self.cal_to_lxc["port"][cal_port_id] = lxc_port_id
+        port.port_id = cal_port_id
+
+        return port.port_id
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        """Delete the specified port
+
+        Arguments:
+            account - a cloud account
+            port_id - the ID of the port to delete
+
+        Raises:
+            A RWErrorNotFound exception is raised if the specified port cannot
+            be found.
+
+        """
+        if port_id not in self.cal_to_lxc["port"]:
+            msg = "Unable to find the specified port ({})"
+            raise exceptions.RWErrorNotFound(msg.format(port_id))
+
+        lxc_port_id = self.cal_to_lxc["port"][port_id]
+
+        # Release the port's ip address back into the network pool
+        port = self.cal.get_port(port_id)
+        ip_pool = self.cal.get_network_ip_pool(port.network_id)
+        ip_pool.deallocate_ip(port.ip_address)
+
+        self.cal.remove_port(port_id)
+        self.lxc.remove_port(lxc_port_id)
+
+        del self.cal_to_lxc["port"][port_id]
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        """Return the specified port
+
+        Arguments:
+            account - a cloud account
+            port_id - the ID of the port to return
+
+        Raises:
+            A RWErrorNotFound exception is raised if the specified port cannot
+            be found.
+
+        Returns:
+            The specified port.
+
+        """
+        if port_id not in self.cal_to_lxc["port"]:
+            msg = "Unable to find the specified port ({})"
+            raise exceptions.RWErrorNotFound(msg.format(port_id))
+
+        return self.cal.get_port(port_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        """Returns a list of ports"""
+        resources = RwcalYang.VimResources()
+        for port in self.datastore.cal_manager.get_port_list():
+            resources.portinfo_list.append(rwcal_copy_object(port))
+
+        return resources
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        """Create a network
+
+        Arguments:
+            account - a cloud account
+            network - a description of the network to create
+
+        Returns:
+            The ID of the newly created network
+
+        """
+
+        # Create the network
+        try:
+            # Setup a pool of mgmt IPv4 addresses
+            if net.bridge_exists(network.network_name):
+                logger.warning("Bridge %s already exists.  Removing.", network.network_name)
+                net.bridge_down(network.network_name)
+                net.bridge_remove(network.network_name)
+
+            # Ensure that the subnet field was filled out and is valid
+            if not network.has_field("subnet"):
+                raise CreateNetworkError("subnet not provided in create network request")
+
+            try:
+                ipaddress.IPv4Network(network.subnet)
+            except ValueError as e:
+                raise CreateNetworkError("Could not convert subnet into a "
+                                         "IPv4Network: %s" % str(network.subnet))
+
+            ip_pool = NetworkIPPool(network.subnet)
+
+            # Create the management bridge with interface information
+            net.create(network.network_name)
+
+        except Exception as e:
+            logger.warning(str(e))
+
+        # Register the network
+        cal_network_id = self.cal.add_network(network)
+        lxc_network_id = self.lxc.add_bridge(network)
+        self.cal.add_network_ip_pool(cal_network_id, ip_pool)
+
+        self.cal_to_lxc["network"][cal_network_id] = lxc_network_id
+
+        # Set the ID of the network object
+        network.network_id = cal_network_id
+
+        return network.network_id
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        """
+        Arguments:
+            account    - a cloud account
+            network_id - the UUID of the network to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified network cannot be
+            found.
+
+        """
+        if network_id not in self.cal_to_lxc["network"]:
+            msg = "Unable to find the specified network ({})"
+            raise exceptions.RWErrorNotFound(msg.format(network_id))
+
+        # Get the associated bridge ID
+        bridge_id = self.cal_to_lxc["network"][network_id]
+
+        # Delete the network
+        network = self.cal.get_network(network_id)
+        net.delete(network.network_name)
+
+        # Remove the network records
+        self.lxc.remove_bridge(bridge_id)
+        self.cal.remove_network(network_id)
+        del self.cal_to_lxc["network"][network_id]
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        """Returns the specified network
+
+        Arguments:
+            account    - a cloud account
+            network_id - the UUID of the network to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified network cannot be
+            found.
+
+        Returns:
+            The specified network
+
+        """
+        return self.cal.get_network(network_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        """Returns a list of network objects"""
+        resources = RwcalYang.VimResources()
+        for network in self.cal.get_network_list():
+            resources.networkinfo_list.append(rwcal_copy_object(network))
+
+        return resources
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        network = RwcalYang.NetworkInfoItem()
+        network.network_name = link_params.name
+        network.subnet = link_params.subnet
+
+        if link_params.has_field("provider_network"):
+            logger.warning("Container CAL does not implement provider network")
+
+        rs, net_id = self.do_create_network(account, network)
+        if rs != RwTypes.RwStatus.SUCCESS:
+            raise exceptions.RWErrorFailure(rs)
+
+        return net_id
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        """Delete a virtual link
+
+        Arguments:
+            account - a cloud account
+            link_id - id for the virtual-link to be deleted
+
+        Returns:
+            None
+        """
+
+        network_ports = self.cal.get_network_ports(link_id)
+        for port_id in network_ports:
+            self.do_delete_port(account, port_id, no_rwstatus=True)
+
+        self.do_delete_network(account, link_id, no_rwstatus=True)
+
+    @staticmethod
+    def fill_connection_point_info(c_point, port_info):
+        """Create a GI object for RwcalYang.VDUInfoParams_ConnectionPoints()
+
+        Converts Port information dictionary object returned by container cal
+        driver into Protobuf Gi Object
+
+        Arguments:
+            port_info - Port information from container cal
+        Returns:
+            Protobuf Gi object for RwcalYang.VDUInfoParams_ConnectionPoints
+        """
+        c_point.name = port_info.port_name
+        c_point.connection_point_id = port_info.port_id
+        c_point.ip_address = port_info.ip_address
+        c_point.state = 'active'
+        c_point.virtual_link_id = port_info.network_id
+        c_point.vdu_id = port_info.vm_id
+
+    @staticmethod
+    def create_virtual_link_info(network_info, port_list):
+        """Create a GI object for VirtualLinkInfoParams
+
+        Converts Network and Port information dictionary object
+        returned by container manager into Protobuf Gi Object
+
+        Arguments:
+            network_info - Network information from container cal
+            port_list - A list of port information from container cal
+            subnet: Subnet information from openstack
+        Returns:
+            Protobuf Gi object for VirtualLinkInfoParams
+        """
+        link = RwcalYang.VirtualLinkInfoParams()
+        link.name = network_info.network_name
+        link.state = 'active'
+        link.virtual_link_id = network_info.network_id
+        for port in port_list:
+            c_point = link.connection_points.add()
+            CloudSimPlugin.fill_connection_point_info(c_point, port)
+
+        link.subnet = network_info.subnet
+
+        return link
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        """Get information about virtual link.
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link
+
+        Returns:
+            Object of type RwcalYang.VirtualLinkInfoParams
+        """
+
+        network = self.do_get_network(account, link_id, no_rwstatus=True)
+        port_ids = self.cal.get_network_ports(network.network_id)
+        ports = [self.cal.get_port(p_id) for p_id in port_ids]
+
+        virtual_link = CloudSimPlugin.create_virtual_link_info(
+                network, ports
+                )
+
+        return virtual_link
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link_list(self, account):
+        """Get information about all the virtual links
+
+        Arguments:
+            account  - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VirtualLinkInfoParams
+        """
+        networks = self.do_get_network_list(account, no_rwstatus=True)
+        vnf_resources = RwcalYang.VNFResources()
+        for network in networks.networkinfo_list:
+            virtual_link = self.do_get_virtual_link(account, network.network_id, no_rwstatus=True)
+            vnf_resources.virtual_link_info_list.append(virtual_link)
+
+        return vnf_resources
+
+    def _create_connection_point(self, account, c_point, vdu_id):
+        """
+        Create a connection point
+        Arguments:
+           account  - a cloud account
+           c_point  - connection_points
+        """
+        port = RwcalYang.PortInfoItem()
+        port.port_name = c_point.name
+        port.network_id = c_point.virtual_link_id
+        port.port_type = 'normal' ### Find Port type from network_profile under cloud account
+        port.vm_id = vdu_id
+        port_id = self.do_create_port(account, port, no_rwstatus=True)
+        return port_id
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        """Create a new virtual deployment unit
+
+        Arguments:
+            account     - a cloud account
+            vdu_init  - information about VDU to create (RwcalYang.VDUInitParams)
+
+        Returns:
+            The vdu_id
+        """
+        ### Create VM
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_name = vdu_init.name
+        vm.image_id = vdu_init.image_id
+        if vdu_init.vdu_init.has_field('userdata'):
+            vm.cloud_init.userdata = vdu_init.vdu_init.userdata
+        vm.user_tags.node_id = vdu_init.node_id
+
+        vm_id = self.do_create_vm(account, vm, no_rwstatus=True)
+
+        ### Now create required number of ports aka connection points
+        port_list = []
+        for c_point in vdu_init.connection_points:
+            virtual_link_id = c_point.virtual_link_id
+
+            # Attempt to fetch the network to verify that the network
+            # already exists.
+            self.do_get_network(account, virtual_link_id, no_rwstatus=True)
+
+            port_id = self._create_connection_point(account, c_point, vm_id)
+            port_list.append(port_id)
+
+        # Finally start the vm
+        self.do_start_vm(account, vm_id, no_rwstatus=True)
+
+        return vm_id
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        """Modify Properties of existing virtual deployment unit
+
+        Arguments:
+            account     -  a cloud account
+            vdu_modify  -  Information about VDU Modification (RwcalYang.VDUModifyParams)
+        """
+        ### First create required number of ports aka connection points
+        port_list = []
+        network_list = []
+        if not vdu_modify.has_field("vdu_id"):
+            raise ValueError("vdu_id must not be empty")
+
+        for c_point in vdu_modify.connection_points_add:
+            if not c_point.has_field("virtual_link_id"):
+                raise ValueError("virtual link id not provided")
+
+            network_list.append(c_point.virtual_link_id)
+            port_id = self._create_connection_point(account, c_point, vdu_modify.vdu_id)
+            port_list.append(port_id)
+
+        ### Delete the requested connection_points
+        for c_point in vdu_modify.connection_points_remove:
+            self.do_delete_port(account, c_point.connection_point_id, no_rwstatus=True)
+
+        self.do_reboot_vm(account, vdu_modify.vdu_id)
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        """Delete a virtual deployment unit
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu to be deleted
+
+        Returns:
+            None
+        """
+        ### Get list of port on VM and delete them.
+        port_id_list = self.cal.get_vm_ports(vdu_id)
+        ports = [self.cal.get_port(p_id) for p_id in port_id_list]
+        for port in ports:
+            self.do_delete_port(account, port.port_id, no_rwstatus=True)
+        self.do_delete_vm(account, vdu_id, no_rwstatus=True)
+
+    @staticmethod
+    def fill_vdu_info(vm_info, port_list):
+        """create a gi object for vduinfoparams
+
+        converts vm information dictionary object returned by openstack
+        driver into protobuf gi object
+
+        arguments:
+            vm_info - vm information from openstack
+            mgmt_network - management network
+            port_list - a list of port information from container cal
+        returns:
+            protobuf gi object for vduinfoparams
+        """
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.name = vm_info.vm_name
+        vdu.vdu_id = vm_info.vm_id
+        vdu.management_ip = vm_info.management_ip
+        vdu.public_ip = vm_info.management_ip
+        vdu.node_id = vm_info.user_tags.node_id
+        vdu.image_id = vm_info.image_id
+        vdu.state = 'active'
+
+        # fill the port information
+        for port in port_list:
+            c_point = vdu.connection_points.add()
+            CloudSimPlugin.fill_connection_point_info(c_point, port)
+
+        vdu.vm_flavor.vcpu_count = 1
+        vdu.vm_flavor.memory_mb = 8 * 1024 # 8GB
+        vdu.vm_flavor.storage_gb = 10
+
+        return vdu
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        """Get information about a virtual deployment unit.
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu
+
+        Returns:
+            Object of type RwcalYang.VDUInfoParams
+        """
+        port_id_list = self.cal.get_vm_ports(vdu_id)
+        ports = [self.cal.get_port(p_id) for p_id in port_id_list]
+        vm_info = self.do_get_vm(account, vdu_id, no_rwstatus=True)
+        vdu_info = CloudSimPlugin.fill_vdu_info(vm_info, ports)
+
+        return vdu_info
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu_list(self, account):
+        """Get information about all the virtual deployment units
+
+        Arguments:
+            account     - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VDUInfoParams
+        """
+
+        vnf_resources = RwcalYang.VNFResources()
+
+        vm_resources = self.do_get_vm_list(account, no_rwstatus=True)
+        for vm in vm_resources.vminfo_list:
+            port_list = self.cal.get_vm_ports(vm.vm_id)
+            port_list = [self.cal.get_port(port_id) for port_id in port_list]
+            vdu = CloudSimPlugin.fill_vdu_info(vm, port_list)
+            vnf_resources.vdu_info_list.append(vdu)
+
+        return vnf_resources
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/test/cloudsim_module_test.py b/rwcal/plugins/vala/rwcal_cloudsim/test/cloudsim_module_test.py
new file mode 100755
index 0000000..64837ad
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsim/test/cloudsim_module_test.py
@@ -0,0 +1,222 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import logging
+import os
+import ipaddress
+import unittest
+import uuid
+import sys
+from gi.repository import RwcalYang
+
+import rift.rwcal.cloudsim.lvm as lvm
+import rift.rwcal.cloudsim.lxc as lxc
+
+sys.path.append('../')
+import rwcal_cloudsim
+
+
+logger = logging.getLogger('rwcal-cloudsim')
+
+
+class CloudsimTest(unittest.TestCase):
+    @classmethod
+    def cleanUp(cls):
+        for container in lxc.containers():
+            lxc.stop(container)
+
+        for container in lxc.containers():
+            lxc.destroy(container)
+
+        #lvm.destroy("rift")
+
+    @classmethod
+    def create_image(cls):
+        image = RwcalYang.ImageInfoItem()
+        image.name = "rift-lxc-image"
+        image.location = "/net/sharedfiles/home1/common/vm/R0.4/rift-mano-devel-latest.qcow2"
+        image.disk_format = "qcow2"
+        image.id = cls.cal.do_create_image(cls.account, image, no_rwstatus=True)
+
+        cls.image = image
+
+    @classmethod
+    def setUpClass(cls):
+        cls.cleanUp()
+
+        lvm.create("rift")
+        cls.account = RwcalYang.CloudAccount()
+        cls.cal = rwcal_cloudsim.CloudSimPlugin()
+        cls.create_image()
+
+    def setUp(self):
+        pass
+
+    def create_vm(self, image, index):
+        vm = RwcalYang.VmInfo()
+        vm.vm_name = 'rift-s{}'.format(index + 1)
+        vm.image_id = image.id
+        vm.user_tags.node_id = str(uuid.uuid4())
+
+        self.cal.do_create_vm(self.account, vm, no_rwstatus=True)
+
+        return vm
+
+    def create_virtual_link(self, index):
+        link = RwcalYang.VirtualLinkReqParams()
+        link.name = 'link-{}'.format(index + 1)
+        link.subnet = '192.168.{}.0/24'.format(index + 1)
+
+        logger.debug("Creating virtual link: %s", link)
+
+        link_id = self.cal.do_create_virtual_link(self.account, link, no_rwstatus=True)
+        return link, link_id
+
+    def create_vdu(self, image, index, virtual_link_ids=None):
+        vdu_init = RwcalYang.VDUInitParams()
+        vdu_init.name = 'rift-vdu{}'.format(index + 1)
+        vdu_init.node_id = str(uuid.uuid4())
+        vdu_init.image_id = image.id
+
+        if virtual_link_ids is not None:
+            for vl_id in virtual_link_ids:
+                cp = vdu_init.connection_points.add()
+                cp.name = "{}_{}".format(vdu_init.name, vl_id)
+                cp.virtual_link_id = vl_id
+
+        vdu_id = self.cal.do_create_vdu(self.account, vdu_init, no_rwstatus=True)
+
+        return vdu_init, vdu_id
+
+    def test_create_vm(self):
+        self.create_vm(self.image, 0)
+
+    def test_create_delete_virtual_link(self):
+        link, link_id = self.create_virtual_link(0)
+        get_link = self.cal.do_get_virtual_link(self.account, link_id, no_rwstatus=True)
+        assert get_link.name == link.name
+        assert get_link.virtual_link_id == link_id
+        assert len(get_link.connection_points) == 0
+        assert get_link.state == "active"
+
+        resources = self.cal.do_get_virtual_link_list(self.account, no_rwstatus=True)
+        assert len(resources.virtual_link_info_list) == 1
+        assert resources.virtual_link_info_list[0] == get_link
+
+        self.cal.do_delete_virtual_link(self.account, link_id, no_rwstatus=True)
+        resources = self.cal.do_get_virtual_link_list(self.account, no_rwstatus=True)
+        assert len(resources.virtual_link_info_list) == 0
+
+    def test_create_delete_vdu(self):
+        vdu, vdu_id = self.create_vdu(self.image, 0)
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+
+        assert get_vdu.image_id == self.image.id
+        assert get_vdu.name == vdu.name
+        assert get_vdu.node_id == vdu.node_id
+
+        assert len(get_vdu.connection_points) == 0
+
+        assert get_vdu.vm_flavor.vcpu_count >= 1
+        assert get_vdu.vm_flavor.memory_mb >= 8 * 1024
+        assert get_vdu.vm_flavor.storage_gb >= 5
+
+        resources = self.cal.do_get_vdu_list(self.account, no_rwstatus=True)
+        assert len(resources.vdu_info_list) == 1
+        assert resources.vdu_info_list[0] == get_vdu
+
+        resources = self.cal.do_delete_vdu(self.account, vdu_id, no_rwstatus=True)
+
+        resources = self.cal.do_get_vdu_list(self.account, no_rwstatus=True)
+        assert len(resources.vdu_info_list) == 0
+
+    def test_create_vdu_single_connection_point(self):
+        link, link_id = self.create_virtual_link(0)
+        vdu, vdu_id = self.create_vdu(self.image, 0, [link_id])
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 1
+        cp = get_vdu.connection_points[0]
+        assert (ipaddress.IPv4Address(cp.ip_address) in
+                ipaddress.IPv4Network(link.subnet))
+
+        get_link = self.cal.do_get_virtual_link(self.account, link_id, no_rwstatus=True)
+        assert len(get_link.connection_points) == 1
+        assert get_link.connection_points[0].vdu_id == vdu_id
+        assert get_link.connection_points[0].virtual_link_id == link_id
+
+        self.cal.do_delete_vdu(self.account, vdu_id, no_rwstatus=True)
+        get_link = self.cal.do_get_virtual_link(self.account, link_id, no_rwstatus=True)
+        assert len(get_link.connection_points) == 0
+
+        self.cal.do_delete_virtual_link(self.account, link_id)
+
+    def test_create_vdu_multiple_connection_point(self):
+        link1, link1_id = self.create_virtual_link(0)
+        link2, link2_id = self.create_virtual_link(1)
+        link3, link3_id = self.create_virtual_link(2)
+        link_id_map = {link1_id: link1, link2_id: link2, link3_id: link3}
+
+        vdu, vdu_id = self.create_vdu(self.image, 0, link_id_map.keys())
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 3
+        for cp in get_vdu.connection_points:
+            assert cp.virtual_link_id in link_id_map
+            link = link_id_map[cp.virtual_link_id]
+
+            assert (ipaddress.IPv4Address(cp.ip_address) in
+                    ipaddress.IPv4Network(link.subnet))
+
+        self.do_delete_vdu(self.account, vdu_id, no_rwstatus=True)
+
+        self.do_delete_virtual_link(self.account, link1_id, no_rwstatus=True)
+        self.do_delete_virtual_link(self.account, link2_id, no_rwstatus=True)
+        self.do_delete_virtual_link(self.account, link3_id, no_rwstatus=True)
+
+    def test_modify_vdu_add_remove_connection_point(self):
+        vdu, vdu_id = self.create_vdu(self.image, 0)
+        link, link_id = self.create_virtual_link(0)
+
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 0
+
+        modify_vdu = RwcalYang.VDUModifyParams()
+        modify_vdu.vdu_id = vdu_id
+        cp = modify_vdu.connection_points_add.add()
+        cp.virtual_link_id = link_id
+        cp.name = "link_1"
+        self.cal.do_modify_vdu(self.account, modify_vdu, no_rwstatus=True)
+
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 1
+
+        modify_vdu = RwcalYang.VDUModifyParams()
+        modify_vdu.vdu_id = vdu_id
+        cp = modify_vdu.connection_points_remove.add()
+        cp.connection_point_id = get_vdu.connection_points[0].connection_point_id
+        self.cal.do_modify_vdu(self.account, modify_vdu, no_rwstatus=True)
+
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 0
+
+        self.cal.do_delete_vdu(self.account, vdu_id, no_rwstatus=True)
+        self.cal.do_delete_virtual_link(self.account, link_id, no_rwstatus=True)
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.DEBUG)
+    unittest.main()
diff --git a/rwcal/plugins/vala/rwcal_cloudsimproxy/CMakeLists.txt b/rwcal/plugins/vala/rwcal_cloudsimproxy/CMakeLists.txt
new file mode 100644
index 0000000..66e0a3f
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsimproxy/CMakeLists.txt
@@ -0,0 +1,27 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+set(PKG_NAME rwcal-cloudsimproxy)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+
+rift_install_python_plugin(rwcal_cloudsimproxy rwcal_cloudsimproxy.py)
+
diff --git a/rwcal/plugins/vala/rwcal_cloudsimproxy/Makefile b/rwcal/plugins/vala/rwcal_cloudsimproxy/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsimproxy/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_cloudsimproxy/rwcal_cloudsimproxy.py b/rwcal/plugins/vala/rwcal_cloudsimproxy/rwcal_cloudsimproxy.py
new file mode 100644
index 0000000..addb4d3
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_cloudsimproxy/rwcal_cloudsimproxy.py
@@ -0,0 +1,709 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+import os
+import shutil
+import tempfile
+
+import requests
+
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang,
+    )
+
+import rw_status
+import rift.cal.rwcal_status as rwcal_status
+import rwlogger
+
+logger = logging.getLogger('rwcal.cloudsimproxy')
+
+
+rwstatus_exception_map = { IndexError: RwTypes.RwStatus.NOTFOUND,
+                           KeyError: RwTypes.RwStatus.NOTFOUND,
+                           NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}
+
+rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
+rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
+
+
+class CloudsimProxyError(Exception):
+    pass
+
+
+class CloudSimProxyPlugin(GObject.Object, RwCal.Cloud):
+    DEFAULT_PROXY_HOST = "localhost"
+    DEFAULT_PROXY_PORT = 9002
+
+    def __init__(self):
+        self._session = None
+        self._host = None
+        self._port = CloudSimProxyPlugin.DEFAULT_PROXY_PORT
+
+    @property
+    def session(self):
+        if self._session is None:
+            self._session = requests.Session()
+
+        return self._session
+
+    @property
+    def host(self):
+        return self._host
+
+    @host.setter
+    def host(self, host):
+        if self._host is not None:
+            if host != self._host:
+                raise CloudsimProxyError("Cloudsim host changed during execution")
+
+        self._host = host
+
+    def _set_host_from_account(self, account):
+        self.host = account.cloudsim_proxy.host
+
+    def _proxy_rpc_call(self, api, **kwargs):
+        url = "http://{host}:{port}/api/{api}".format(
+                host=self._host,
+                port=self._port,
+                api=api,
+                )
+
+        post_dict = {}
+        for key, val in kwargs.items():
+            post_dict[key] = val
+
+        logger.debug("Sending post to url %s with json data: %s", url, post_dict)
+        r = self.session.post(url, json=post_dict)
+        r.raise_for_status()
+
+        response_dict = r.json()
+        logger.debug("Got json response: %s", response_dict)
+
+        return_vals = []
+        for return_val in response_dict["return_vals"]:
+            value = return_val["value"]
+            proto_type = return_val["proto_type"]
+            if proto_type is not None:
+                gi_cls = getattr(RwcalYang, proto_type)
+                logger.debug("Deserializing into %s", proto_type)
+                gi_obj = gi_cls.from_dict(value)
+                value = gi_obj
+
+            return_vals.append(value)
+
+        logger.debug("Returning RPC return values: %s", return_vals)
+
+        if len(return_vals) == 0:
+            return None
+
+        elif len(return_vals) == 1:
+            return return_vals[0]
+
+        else:
+            return tuple(return_vals[1:])
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        logger.addHandler(
+            rwlogger.RwLogger(
+                category="rw-cal-log",
+                subcategory="cloudsimproxy",
+                log_hdl=rwlog_ctx,
+            )
+        )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        """Returns the management network
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            a NetworkInfo object
+
+        """
+
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_management_network")
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        """
+        Create a new tenant.
+
+        @param name     - name to assign to the tenant.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """
+        delete a tenant.
+
+        @param tenant_id     - id of tenant to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """
+        List tenants.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        """
+        Create a new role.
+
+        @param name         - name to assign to the role.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """
+        delete a role.
+
+        @param role_id     - id of role to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """
+        List roles.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        """Create a new image
+
+        Creates a new container based upon the template and tarfile specified.
+        Only one image is currently supported for a given instance of the CAL.
+
+        Arguments:
+            account - a cloud account
+            image   - an ImageInfo object
+
+        Raises:
+            An RWErrorDuplicate is raised if create_image is called and there
+            is already an image.
+
+        Returns:
+            The UUID of the new image
+
+        """
+        self._set_host_from_account(account)
+
+        if image.has_field("fileno"):
+            logger.debug("Got fileno for cloudsim image create")
+            new_fileno = os.dup(image.fileno)
+            read_hdl = os.fdopen(new_fileno, 'rb')
+            write_hdl = tempfile.NamedTemporaryFile()
+            image.location = write_hdl.name
+            logger.debug("Created temporary file to store the cloudsim image: %s", image.location)
+            shutil.copyfileobj(read_hdl, write_hdl)
+
+            image_dict = image.as_dict()
+            del image_dict["fileno"]
+        else:
+            image_dict = image.as_dict()
+
+        return self._proxy_rpc_call("create_image", image=image_dict)
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """Deletes an image
+
+        This function will remove the record of the image from the CAL and
+        destroy the associated container.
+
+        Arguments:
+            account  - a cloud account
+            image_id - the UUID of the image to delete
+
+        Raises:
+            An RWErrorNotEmpty exception is raised if there are VMs based on
+            this image (the VMs need to be deleted first). An RWErrorNotFound
+            is raised if the image_id does not match any of the known images.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_image")
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        """Returns the specified image
+
+        Arguments:
+            account  - a cloud account
+            image_id - the UUID of the image to retrieve
+
+        Raises:
+            An RWErrorNotFound exception is raised if the image_id does not
+            match any of the known images.
+
+        Returns:
+            An image object
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_image", image_id=image_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """Returns a list of images"""
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_image_list")
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        """Create a VM
+
+        Arguments:
+            vm - the VM info used to define the desire VM
+
+        Raises:
+            An RWErrorFailure is raised if there is not
+
+        Returns:
+            a string containing the unique id of the created VM
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_vm", vm=vm.as_dict())
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """Starts the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to start
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("start_vm", vm_id=vm_id)
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """Stops the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to stop
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("stop_vm", vm_id=vm_id)
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """Deletes the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_vm", vm_id=vm_id)
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """
+        reboot a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("reboot_vm", vm_id=vm_id)
+
+    @rwstatus
+    def do_get_vm(self, account, vm_id):
+        """Returns the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to return
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        Returns:
+            a VMInfoItem object
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_vm", vm_id=vm_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        """Returns the a list of the VMs known to the driver
+
+        Returns:
+            a list of VMInfoItem objects
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_vm_list")
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        """
+        create new flavor.
+
+        @param flavor   - Flavor object
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_flavor", flavor=flavor.as_dict())
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """
+        Delete flavor.
+
+        @param flavor_id     - Flavor id to be deleted.
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_flavor", flavor_id=flavor_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        """
+        Return the specified flavor
+
+        @param flavor_id - the id of the flavor to return
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_flavor", flavor_id=flavor_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """
+        Return a list of flavors
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_flavor_list")
+
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        """Create a port between a network and a virtual machine
+
+        Arguments:
+            account - a cloud account
+            port    - a description of port to create
+
+        Raises:
+            Raises an RWErrorNotFound exception if either the network or the VM
+            associated with the port cannot be found.
+
+        Returns:
+            the ID of the newly created port.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_port", port=port.as_dict())
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        """Delete the specified port
+
+        Arguments:
+            account - a cloud account
+            port_id - the ID of the port to delete
+
+        Raises:
+            A RWErrorNotFound exception is raised if the specified port cannot
+            be found.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_port", port_id=port_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        """Return the specified port
+
+        Arguments:
+            account - a cloud account
+            port_id - the ID of the port to return
+
+        Raises:
+            A RWErrorNotFound exception is raised if the specified port cannot
+            be found.
+
+        Returns:
+            The specified port.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_port", port_id=port_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        """Returns a list of ports"""
+
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_port_list")
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        """Create a network
+
+        Arguments:
+            account - a cloud account
+            network - a description of the network to create
+
+        Returns:
+            The ID of the newly created network
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_network", network=network.as_dict())
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        """
+        Arguments:
+            account    - a cloud account
+            network_id - the UUID of the network to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified network cannot be
+            found.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_network", network_id=network_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        """Returns the specified network
+
+        Arguments:
+            account    - a cloud account
+            network_id - the UUID of the network to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified network cannot be
+            found.
+
+        Returns:
+            The specified network
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_network", network_id=network_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        """Returns a list of network objects"""
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_network_list")
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        If creds are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        self._set_host_from_account(account)
+
+        status = RwcalYang.CloudConnectionStatus()
+        try:
+            self._proxy_rpc_call("get_vm_list")
+        except Exception as e:
+            status.status = "failure"
+            status.details = "connection to cloudsim server failed: %s" % str(e)
+        else:
+            status.status = "success"
+            status.details = "Connection was successful"
+
+        return status
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_virtual_link", link_params=link_params.as_dict())
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        """Get information about virtual link.
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link
+
+        Returns:
+            Object of type RwcalYang.VirtualLinkInfoParams
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_virtual_link", link_id=link_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_virtual_link_list(self, account):
+        """Returns the a list of the Virtual links
+
+        Returns:
+            a list of RwcalYang.VirtualLinkInfoParams objects
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_virtual_link_list")
+
+    @rwstatus(ret_on_failure=[None])
+    def do_delete_virtual_link(self, account, link_id):
+        """Delete the virtual link
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_virtual_link", link_id=link_id)
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_virtual_link", link_params=link_params.as_dict())
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        """Create a new virtual deployment unit
+
+        Arguments:
+            account     - a cloud account
+            vdu_init  - information about VDU to create (RwcalYang.VDUInitParams)
+
+        Returns:
+            The vdu_id
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_vdu", vdu_params=vdu_init.as_dict())
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        """Modify Properties of existing virtual deployment unit
+
+        Arguments:
+            account     -  a cloud account
+            vdu_modify  -  Information about VDU Modification (RwcalYang.VDUModifyParams)
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("modify_vdu", vdu_params=vdu_modify.as_dict())
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        """Delete a virtual deployment unit
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu to be deleted
+
+        Returns:
+            None
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_vdu", vdu_id=vdu_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        """Get information about a virtual deployment unit.
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu
+
+        Returns:
+            Object of type RwcalYang.VDUInfoParams
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_vdu", vdu_id=vdu_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu_list(self, account):
+        """Get information about all the virtual deployment units
+
+        Arguments:
+            account     - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VDUInfoParams
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_vdu_list")
diff --git a/rwcal/plugins/vala/rwcal_mock/CMakeLists.txt b/rwcal/plugins/vala/rwcal_mock/CMakeLists.txt
new file mode 100644
index 0000000..1edf187
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_mock/CMakeLists.txt
@@ -0,0 +1,27 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+### rwcal-mock package
+set(PKG_NAME rwcal-mock)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+
+rift_install_python_plugin(rwcal_mock rwcal_mock.py)
diff --git a/rwcal/plugins/vala/rwcal_mock/Makefile b/rwcal/plugins/vala/rwcal_mock/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_mock/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_mock/rwcal_mock.py b/rwcal/plugins/vala/rwcal_mock/rwcal_mock.py
new file mode 100644
index 0000000..a1776d1
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_mock/rwcal_mock.py
@@ -0,0 +1,616 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import random
+import socket
+import struct
+import collections
+import hashlib
+import logging
+import os
+import uuid
+
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+import rw_status
+import rift.cal.rwcal_status as rwcal_status
+import rwlogger
+
+logger = logging.getLogger('rwcal.mock')
+
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+class ImageLocationError(Exception):
+    pass
+
+
+rwstatus_exception_map = { IndexError: RwTypes.RwStatus.NOTFOUND,
+                           KeyError: RwTypes.RwStatus.NOTFOUND,
+                           NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,
+                           UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+                           MissingFileError: RwTypes.RwStatus.NOTFOUND,
+}
+
+rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
+rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
+
+class Resources(object):
+    def __init__(self):
+        self.images = dict()
+        self.vlinks = dict()
+        self.vdus  = dict()
+        self.flavors = dict()
+
+class MockPlugin(GObject.Object, RwCal.Cloud):
+    """This class implements the abstract methods in the Cloud class.
+    Mock is used for unit testing."""
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self.resources = collections.defaultdict(Resources)
+
+    @staticmethod
+    def get_uuid(name):
+        if name == None:
+            raise ValueError("Name can not be None")
+        return str(uuid.uuid3(uuid.NAMESPACE_DNS, name))
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-cal-log",
+                    subcategory="rwcal.mock",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+        account = RwcalYang.CloudAccount()
+        account.name = 'mock_account'
+        account.account_type = 'mock'
+        account.mock.username = 'mock_user'
+        self.create_default_resources(account)
+        account.name = 'mock_account1'
+        self.create_default_resources(account)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        If creds are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus(
+                status="success",
+                details=""
+                )
+
+        return status
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        """
+        Returns the management network
+
+        @param account - a cloud account
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        """
+        Create a new tenant.
+
+        @param name     - name to assign to the tenant.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """
+        delete a tenant.
+
+        @param tenant_id     - id of tenant to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """
+        List tenants.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        """
+        Create a new role.
+
+        @param name         - name to assign to the role.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """
+        delete a role.
+
+        @param role_id     - id of role to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """
+        List roles.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        """
+        Create a VM image
+
+        @param account - cloud account information
+        @param image   - information about the image
+        """
+        if image.location is None:
+            raise ImageLocationError("uninitialized image location")
+
+        if not os.path.exists(image.location):
+            raise MissingFileError("{} does not exist".format(image.location))
+
+        image.id = self.get_uuid(image.name)
+
+        self.resources[account.name].images[image.id] = image
+        logger.debug('created image: {}'.format(image.id))
+        return image.id
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """
+        delete a vm image.
+
+        @param image_id     - Instance id of VM image to be deleted.
+        """
+        if account.name not in self.resources:
+            raise UnknownAccountError()
+
+        del self.resources[account.name].images[image_id]
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        return self.resources[account.name].images[image_id]
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """
+        Return a list of the names of all available images.
+        """
+        boxed_image_list = RwcalYang.VimResources()
+        for image in self.resources[account.name].images.values():
+            image_entry = RwcalYang.ImageInfoItem()
+            image_entry.id = image.id
+            image_entry.name = image.name
+            if image.has_field('checksum'):
+
+                image_entry.checksum = image.checksum
+            boxed_image_list.imageinfo_list.append(image_entry)
+
+        logger.debug("Image list for {}: {}".format(account.name, boxed_image_list.imageinfo_list))
+        return boxed_image_list
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        """
+        Create a new virtual machine.
+
+        @param name     - name to assign to the VM.  This does not have to be unique.
+        @param image    - name of image to load on the VM.
+        @param size     - name of the size of the VM to create.
+        @param location - name of the location to launch the VM in.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """
+        Start a virtual machine.
+
+        @param vm_id - id of VM to start
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """
+        Stop a virtual machine.
+
+        @param vm_id - id of VM to stop
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """
+        delete a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """
+        reboot a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        """
+        create new flavor.
+
+        @param flavor   - Flavor object
+        """
+        flavor_id = self.get_uuid(flavor.name)
+        self.resources[account.name].flavors[flavor_id] = flavor
+        logger.debug('Created flavor: {}'.format(flavor_id))
+        return flavor_id
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """
+        Delete flavor.
+
+        @param flavor_id     - Flavor id to be deleted.
+        """
+        logger.debug('Deleted flavor: {}'.format(flavor_id))
+        self.resources[account.name].flavors.pop(flavor_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        """
+        Return the specified flavor
+
+        @param flavor_id - the id of the flavor to return
+        """
+        flavor = self.resources[account.name].flavors[flavor_id]
+        logger.debug('Returning flavor-info for : {}'.format(flavor_id))
+        return flavor
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """
+        Return a list of flavors
+        """
+        vim_resources = RwcalYang.VimResources()
+        for flavor in self.resources[account.name].flavors.values():
+            f = RwcalYang.FlavorInfoItem()
+            f.copy_from(flavor)
+            vim_resources.flavorinfo_list.append(f)
+        logger.debug("Returning list of flavor-info of size: %d", len(vim_resources.flavorinfo_list))
+        return vim_resources
+
+
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        raise NotImplementedError()
+
+    def create_default_resources(self, account):
+        """
+        Create default resources
+        """
+        link_list = []
+        ### Add virtual links
+        #for i in range(1):
+        #    vlink = RwcalYang.VirtualLinkReqParams()
+        #    vlink.name = 'link-'+str(i)
+        #    vlink.subnet = '10.0.0.0/24'
+        #    rs, vlink_id = self.do_create_virtual_link(account, vlink)
+        #    assert vlink_id != ''
+        #    logger.debug("Creating static virtual-link with name: %s", vlink.name)
+        #    link_list.append(vlink_id)
+
+        #### Add VDUs
+        #for i in range(8):
+        #    vdu = RwcalYang.VDUInitParams()
+        #    vdu.name = 'vdu-'+str(i)
+        #    vdu.node_id = str(i)
+        #    vdu.image_id = self.get_uuid('image-'+str(i))
+        #    vdu.flavor_id = self.get_uuid('flavor'+str(i))
+        #    vdu.vm_flavor.vcpu_count = 4
+        #    vdu.vm_flavor.memory_mb = 4096*2
+        #    vdu.vm_flavor.storage_gb = 40
+        #    for j in range(2):
+        #        c = vdu.connection_points.add()
+        #        c.name = vdu.name+'-port-'+str(j)
+        #        c.virtual_link_id = link_list[j]
+        #    rs, vdu_id = self.do_create_vdu(account, vdu)
+        #    assert vdu_id != ''
+        #    logger.debug("Creating static VDU with name: %s", vdu.name)
+
+        for i in range(2):
+            flavor = RwcalYang.FlavorInfoItem()
+            flavor.name = 'flavor-'+str(i)
+            flavor.vm_flavor.vcpu_count = 4
+            flavor.vm_flavor.memory_mb = 4096*2
+            flavor.vm_flavor.storage_gb = 40
+            rc, flavor_id = self.do_create_flavor(account, flavor)
+
+        for i in range(2):
+            image = RwcalYang.ImageInfoItem()
+            image.name = "rwimage"
+            image.id = self.get_uuid('image-'+str(i))
+            image.checksum = self.get_uuid('rwimage'+str(i))
+            image.location = "/dev/null"
+            rc, image_id = self.do_create_image(account, image)
+
+        image = RwcalYang.ImageInfoItem()
+        image.name = "Fedora-x86_64-20-20131211.1-sda.qcow2"
+        image.id = self.get_uuid(image.name)
+        image.checksum = self.get_uuid(image.name)
+        image.location = "/dev/null"
+        rc, image_id = self.do_create_image(account, image)
+
+        image = RwcalYang.ImageInfoItem()
+        image.name = "Fedora-x86_64-20-20131211.1-sda-ping.qcow2"
+        image.id = self.get_uuid(image.name)
+        image.checksum = "a6ffaa77f949a9e4ebb082c6147187cf"#self.get_uuid(image.name)
+        image.location = "/dev/null"
+        rc, image_id = self.do_create_image(account, image)
+
+        image = RwcalYang.ImageInfoItem()
+        image.name = "Fedora-x86_64-20-20131211.1-sda-pong.qcow2"
+        image.id = self.get_uuid(image.name)
+        image.checksum = "977484d95575f80ef8399c9cf1d45ebd"#self.get_uuid(image.name)
+        image.location = "/dev/null"
+        rc, image_id = self.do_create_image(account, image)
+
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        vlink_id = self.get_uuid("%s_%s" % (link_params.name, len(self.resources[account.name].vlinks)))
+        vlink = RwcalYang.VirtualLinkInfoParams()
+        vlink.name = link_params.name
+        vlink.state = 'active'
+        vlink.virtual_link_id = vlink_id
+        vlink.subnet = link_params.subnet
+        vlink.connection_points = []
+        for field in link_params.provider_network.fields:
+            if link_params.provider_network.has_field(field):
+                setattr(vlink.provider_network, field, getattr(link_params.provider_network, field))
+
+        self.resources[account.name].vlinks[vlink_id] = vlink
+        logger.debug('created virtual-link: {}'.format(vlink_id))
+        return vlink_id
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        self.resources[account.name].vlinks.pop(link_id)
+        logger.debug('deleted virtual-link: {}'.format(link_id))
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        vlink = self.resources[account.name].vlinks[link_id]
+        logger.debug('Returning virtual-link-info for : {}'.format(link_id))
+        return vlink
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_virtual_link_list(self, account):
+        vnf_resources = RwcalYang.VNFResources()
+        for r in self.resources[account.name].vlinks.values():
+            vlink = RwcalYang.VirtualLinkInfoParams()
+            vlink.copy_from(r)
+            vnf_resources.virtual_link_info_list.append(vlink)
+        logger.debug("Returning list of virtual-link-info of size: %d", len(vnf_resources.virtual_link_info_list))
+        return vnf_resources
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        vdu_id = self.get_uuid("%s_%s" % (vdu_init.name, len(self.resources[account.name].vdus)))
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.vdu_id = vdu_id
+        vdu.name = vdu_init.name
+        vdu.node_id = vdu_init.node_id
+        vdu.image_id = vdu_init.image_id
+        if vdu_init.has_field('flavor_id'):
+            vdu.flavor_id = vdu_init.flavor_id
+
+        if vdu_init.has_field('vm_flavor'):
+            xx = vdu.vm_flavor.new()
+            xx.from_pbuf(vdu_init.vm_flavor.to_pbuf())
+            vdu.vm_flavor = xx
+
+        if vdu_init.has_field('guest_epa'):
+            xx = vdu.guest_epa.new()
+            xx.from_pbuf(vdu_init.guest_epa.to_pbuf())
+            vdu.guest_epa = xx
+
+        if vdu_init.has_field('vswitch_epa'):
+            xx = vdu.vswitch_epa.new()
+            xx.from_pbuf(vdu_init.vswitch_epa.to_pbuf())
+            vdu.vswitch_epa = xx
+
+        if vdu_init.has_field('hypervisor_epa'):
+            xx = vdu.hypervisor_epa.new()
+            xx.from_pbuf(vdu_init.hypervisor_epa.to_pbuf())
+            vdu.hypervisor_epa = xx
+
+        if vdu_init.has_field('host_epa'):
+            xx = vdu.host_epa.new()
+            xx.from_pbuf(vdu_init.host_epa.to_pbuf())
+            vdu.host_epa = xx
+
+        vdu.state = 'active'
+        vdu.management_ip = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
+        vdu.public_ip = vdu.management_ip
+
+        for c in vdu_init.connection_points:
+            p = vdu.connection_points.add()
+            p.connection_point_id = self.get_uuid(c.name)
+            p.name = c.name
+            p.vdu_id = vdu_id
+            p.state = 'active'
+            p.ip_address = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
+            p.virtual_link_id = c.virtual_link_id
+            # Need to add this connection_point to virtual link
+            vlink = self.resources[account.name].vlinks[c.virtual_link_id]
+            v = vlink.connection_points.add()
+            for field in p.fields:
+                if p.has_field(field):
+                    setattr(v, field, getattr(p, field))
+
+        self.resources[account.name].vdus[vdu_id] = vdu
+        logger.debug('Created vdu: {}'.format(vdu_id))
+        return vdu_id
+
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        vdu = self.resources[account.name].vdus[vdu_modify.vdu_id]
+        for c in vdu_modify.connection_points_add:
+            p = vdu.connection_points.add()
+            p.connection_point_id = self.get_uuid(c.name)
+            p.name = c.name
+            p.vdu_id = vdu.vdu_id
+            p.state = 'active'
+            p.ip_address = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
+            p.virtual_link_id = c.virtual_link_id
+            # Need to add this connection_point to virtual link
+            vlink = self.resources[account.name].vlinks[c.virtual_link_id]
+            aa = RwcalYang.VirtualLinkInfoParams_ConnectionPoints()
+            aa.connection_point_id = p.connection_point_id
+            aa.name = p.name
+            aa.virtual_link_id = vlink.virtual_link_id
+            aa.state = 'active'
+            aa.ip_address = p.ip_address
+            aa.vdu_id = p.vdu_id
+            vlink.connection_points.append(aa)
+
+        for c in vdu_modify.connection_points_remove:
+            for d in vdu.connection_points:
+                if c.connection_point_id == d.connection_point_id:
+                    vdu.connection_points.remove(d)
+                    break
+            for k, vlink in self.resources[account.name].vlinks.items():
+                for z in vlink.connection_points:
+                    if z.connection_point_id == c.connection_point_id:
+                        vlink.connection_points.remove(z)
+                        break
+        logger.debug('modified vdu: {}'.format(vdu_modify.vdu_id))
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        vdu = self.resources[account.name].vdus.pop(vdu_id)
+        for c in vdu.connection_points:
+            vlink = self.resources[account.name].vlinks[c.virtual_link_id]
+            z = [p for p in vlink.connection_points if p.connection_point_id == c.connection_point_id]
+            assert len(z) == 1
+            vlink.connection_points.remove(z[0])
+
+        logger.debug('deleted vdu: {}'.format(vdu_id))
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        vdu = self.resources[account.name].vdus[vdu_id]
+        logger.debug('Returning vdu-info for : {}'.format(vdu_id))
+        return vdu.copy()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_vdu_list(self, account):
+        vnf_resources = RwcalYang.VNFResources()
+        for r in self.resources[account.name].vdus.values():
+            vdu = RwcalYang.VDUInfoParams()
+            vdu.copy_from(r)
+            vnf_resources.vdu_info_list.append(vdu)
+        logger.debug("Returning list of vdu-info of size: %d", len(vnf_resources.vdu_info_list))
+        return vnf_resources
+
diff --git a/rwcal/plugins/vala/rwcal_openmano/CMakeLists.txt b/rwcal/plugins/vala/rwcal_openmano/CMakeLists.txt
new file mode 100644
index 0000000..3218907
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openmano/CMakeLists.txt
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwcal_openmano rwcal_openmano.py)
diff --git a/rwcal/plugins/vala/rwcal_openmano/Makefile b/rwcal/plugins/vala/rwcal_openmano/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openmano/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_openmano/rwcal_openmano.py b/rwcal/plugins/vala/rwcal_openmano/rwcal_openmano.py
new file mode 100644
index 0000000..1503d64
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openmano/rwcal_openmano.py
@@ -0,0 +1,254 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+import rw_status
+import rwlogger
+
+logger = logging.getLogger('rwcal.openmano')
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+class ImageLocationError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class RwcalOpenmanoPlugin(GObject.Object, RwCal.Cloud):
+    """Stub implementation the CAL VALA methods for Openmano. """
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-cal-log",
+                    subcategory="openmano",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        If creds are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus(
+                status="success",
+                details=""
+                )
+        print("Returning status: %s", str(status))
+        return status
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        logger.warning("Creating image on openmano not supported")
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        return RwcalYang.VimResources()
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        return RwcalYang.VimResources()
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        return RwcalYang.VimResources()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_virtual_link_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_vdu_list(self, account):
+        raise NotImplementedError()
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/CMakeLists.txt b/rwcal/plugins/vala/rwcal_openmano_vimconnector/CMakeLists.txt
new file mode 100644
index 0000000..8938f0a
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openmano_vimconnector/CMakeLists.txt
@@ -0,0 +1,35 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+### rwcal-openstack package
+set(PKG_NAME rwcal-openmano-vimconnector)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+rift_install_python_plugin(rwcal_openmano_vimconnector rwcal_openmano_vimconnector.py)
+
+rift_python_install_tree(
+  FILES
+    rift/rwcal/openmano_vimconnector/__init__.py
+    rift/rwcal/openmano_vimconnector/vimconn.py
+    rift/rwcal/openmano_vimconnector/vimconn_openvim.py
+    rift/rwcal/openmano_vimconnector/openmano_schemas.py
+  PYTHON3_ONLY
+  COMPONENT ${PKG_LONG_NAME})
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/Makefile b/rwcal/plugins/vala/rwcal_openmano_vimconnector/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openmano_vimconnector/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/README b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/README
new file mode 100644
index 0000000..d235ad5
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/README
@@ -0,0 +1,4 @@
+Below is commit of two files taken from Openmano git repository
+https://raw.githubusercontent.com/nfvlabs/openmano/71ffb2c9be4639ce2ec6179d45a2690cf6589c95/openmano/vimconn.py
+https://raw.githubusercontent.com/nfvlabs/openmano/71ffb2c9be4639ce2ec6179d45a2690cf6589c95/openmano/vimconn_openvim.py
+https://raw.githubusercontent.com/nfvlabs/openmano/71ffb2c9be4639ce2ec6179d45a2690cf6589c95/openmano/openmano_schemas.py
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/__init__.py b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/__init__.py
new file mode 100644
index 0000000..e405796
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/__init__.py
@@ -0,0 +1 @@
+from .vimconn_openvim import vimconnector
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/openmano_schemas.py b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/openmano_schemas.py
new file mode 100644
index 0000000..1f1bbe7
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/openmano_schemas.py
@@ -0,0 +1,752 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+JSON schemas used by openmano httpserver.py module to parse the different files and messages sent through the API 
+'''
+__author__="Alfonso Tierno, Gerardo Garcia"
+__date__ ="$09-oct-2014 09:09:48$"
+
+#Basis schemas
+patern_name="^[ -~]+$"
+passwd_schema={"type" : "string", "minLength":1, "maxLength":60}
+nameshort_schema={"type" : "string", "minLength":1, "maxLength":60, "pattern" : "^[^,;()'\"]+$"}
+name_schema={"type" : "string", "minLength":1, "maxLength":255, "pattern" : "^[^,;()'\"]+$"}
+xml_text_schema={"type" : "string", "minLength":1, "maxLength":1000, "pattern" : "^[^']+$"}
+description_schema={"type" : ["string","null"], "maxLength":255, "pattern" : "^[^'\"]+$"}
+id_schema_fake = {"type" : "string", "minLength":2, "maxLength":36 }  #"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
+id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+pci_schema={"type":"string", "pattern":"^[0-9a-fA-F]{4}(:[0-9a-fA-F]{2}){2}\.[0-9a-fA-F]$"}
+http_schema={"type":"string", "pattern":"^https?://[^'\"=]+$"}
+bandwidth_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]bps)?$"}
+memory_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]i?[Bb])?$"}
+integer0_schema={"type":"integer","minimum":0}
+integer1_schema={"type":"integer","minimum":1}
+path_schema={"type":"string", "pattern":"^(\.){0,2}(/[^/\"':{}\(\)]+)+$"}
+vlan_schema={"type":"integer","minimum":1,"maximum":4095}
+vlan1000_schema={"type":"integer","minimum":1000,"maximum":4095}
+mac_schema={"type":"string", "pattern":"^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){5}$"}  #must be unicast LSB bit of MSB byte ==0 
+#mac_schema={"type":"string", "pattern":"^([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$"}
+ip_schema={"type":"string","pattern":"^([0-9]{1,3}.){3}[0-9]{1,3}$"}
+port_schema={"type":"integer","minimum":1,"maximum":65534}
+object_schema={"type":"object"}
+schema_version_2={"type":"integer","minimum":2,"maximum":2}
+log_level_schema={"type":"string", "enum":["DEBUG", "INFO", "WARNING","ERROR","CRITICAL"]}
+
+metadata_schema={
+    "type":"object",
+    "properties":{
+        "architecture": {"type":"string"},
+        "use_incremental": {"type":"string","enum":["yes","no"]},
+        "vpci": pci_schema,
+        "os_distro": {"type":"string"},
+        "os_type": {"type":"string"},
+        "os_version": {"type":"string"},
+        "bus": {"type":"string"},
+        "topology": {"type":"string", "enum": ["oneSocket"]}
+    }
+}
+
+#Schema for the configuration file
+config_schema = {
+    "title":"configuration response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "http_port": port_schema,
+        "http_admin_port": port_schema,
+        "http_host": nameshort_schema,
+        "vnf_repository": path_schema,
+        "db_host": nameshort_schema,
+        "db_user": nameshort_schema,
+        "db_passwd": {"type":"string"},
+        "db_name": nameshort_schema,
+        # Next fields will disappear once the MANO API includes appropriate primitives
+        "vim_url": http_schema,
+        "vim_url_admin": http_schema,
+        "vim_name": nameshort_schema,
+        "vim_tenant_name": nameshort_schema,
+        "mano_tenant_name": nameshort_schema,
+        "mano_tenant_id": id_schema, 
+        "http_console_ports": {
+            "type": "array", 
+            "items": {"OneOf" : [
+                port_schema, 
+                {"type":"object", "properties":{"from": port_schema, "to": port_schema}, "required": ["from","to"]} 
+            ]}
+        },
+        "log_level": log_level_schema,
+        "log_level_db": log_level_schema,
+        "log_level_vimconn": log_level_schema
+    },
+    "required": ['db_host', 'db_user', 'db_passwd', 'db_name'],
+    "additionalProperties": False
+}
+
+tenant_schema = {
+    "title":"tenant information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "tenant":{
+            "type":"object",
+            "properties":{
+                "name": nameshort_schema,
+                "description": description_schema,
+            },
+            "required": ["name"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["tenant"],
+    "additionalProperties": False
+}
+tenant_edit_schema = {
+    "title":"tenant edit information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "tenant":{
+            "type":"object",
+            "properties":{
+                "name": name_schema,
+                "description": description_schema,
+            },
+            "additionalProperties": False
+        }
+    },
+    "required": ["tenant"],
+    "additionalProperties": False
+}
+
+datacenter_schema_properties={
+                "name": name_schema,
+                "description": description_schema,
+                "type": nameshort_schema, #currently "openvim" or "openstack", can be enlarge with plugins
+                "vim_url": description_schema,
+                "vim_url_admin": description_schema,
+                "config": { "type":"object" }
+            }
+
+datacenter_schema = {
+    "title":"datacenter information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "datacenter":{
+            "type":"object",
+            "properties":datacenter_schema_properties,
+            "required": ["name", "vim_url"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["datacenter"],
+    "additionalProperties": False
+}
+
+
+datacenter_edit_schema = {
+    "title":"datacenter edit nformation schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "datacenter":{
+            "type":"object",
+            "properties":datacenter_schema_properties,
+            "additionalProperties": False
+        }
+    },
+    "required": ["datacenter"],
+    "additionalProperties": False
+}
+
+
+netmap_new_schema = {
+    "title":"netmap new information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "netmap":{   #delete from datacenter
+            "type":"object",
+            "properties":{
+                "name": name_schema,  #name or uuid of net to change
+                "vim_id": id_schema,
+                "vim_name": name_schema
+            },
+            "minProperties": 1,
+            "additionalProperties": False
+        },
+    },
+    "required": ["netmap"],
+    "additionalProperties": False
+}
+
+netmap_edit_schema = {
+    "title":"netmap edit information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "netmap":{   #delete from datacenter
+            "type":"object",
+            "properties":{
+                "name": name_schema,  #name or uuid of net to change
+            },
+            "minProperties": 1,
+            "additionalProperties": False
+        },
+    },
+    "required": ["netmap"],
+    "additionalProperties": False
+}
+
+datacenter_action_schema = {
+    "title":"datacenter action information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "net-update":{"type":"null",},
+        "net-edit":{
+            "type":"object",
+            "properties":{
+                "net": name_schema,  #name or uuid of net to change
+                "name": name_schema,
+                "description": description_schema,
+                "shared": {"type": "boolean"}
+            },
+            "minProperties": 1,
+            "additionalProperties": False
+        },
+        "net-delete":{
+            "type":"object",
+            "properties":{
+                "net": name_schema,  #name or uuid of net to change
+            },
+            "required": ["net"],
+            "additionalProperties": False
+        },
+    },
+    "minProperties": 1,
+    "maxProperties": 1,
+    "additionalProperties": False
+}
+
+
+datacenter_associate_schema={
+    "title":"datacenter associate information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "datacenter":{
+            "type":"object",
+            "properties":{
+                "vim_tenant": id_schema,
+                "vim_tenant_name": nameshort_schema,
+                "vim_username": nameshort_schema,
+                "vim_password": nameshort_schema,
+            },
+#            "required": ["vim_tenant"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["datacenter"],
+    "additionalProperties": False
+}
+
+internal_connection_element_schema = {
+    "type":"object",
+    "properties":{
+        "VNFC": name_schema,
+        "local_iface_name": name_schema
+    }
+}
+
+internal_connection_schema = {
+    "type":"object",
+    "properties":{
+        "name": name_schema,
+        "description":description_schema,
+        "type":{"type":"string", "enum":["bridge","data","ptp"]},
+        "elements": {"type" : "array", "items": internal_connection_element_schema, "minItems":2}
+    },
+    "required": ["name", "type", "elements"],
+    "additionalProperties": False
+}
+
+external_connection_schema = {
+    "type":"object",
+    "properties":{
+        "name": name_schema,
+        "type":{"type":"string", "enum":["mgmt","bridge","data"]},
+        "VNFC": name_schema,
+        "local_iface_name": name_schema ,
+        "description":description_schema
+    },
+    "required": ["name", "type", "VNFC", "local_iface_name"],
+    "additionalProperties": False
+}
+
+interfaces_schema={
+    "type":"array",
+    "items":{
+        "type":"object",
+        "properties":{
+            "name":name_schema,
+            "dedicated":{"type":"string","enum":["yes","no","yes:sriov"]},
+            "bandwidth":bandwidth_schema,
+            "vpci":pci_schema,
+            "mac_address": mac_schema
+        },
+        "additionalProperties": False,
+        "required": ["name","dedicated", "bandwidth"]
+    }
+}
+
+bridge_interfaces_schema={
+    "type":"array",
+    "items":{
+        "type":"object",
+        "properties":{
+            "name": name_schema,
+            "bandwidth":bandwidth_schema,
+            "vpci":pci_schema,
+            "mac_address": mac_schema,
+            "model": {"type":"string", "enum":["virtio","e1000","ne2k_pci","pcnet","rtl8139"]}
+        },
+        "additionalProperties": False,
+        "required": ["name"]
+    }
+}
+
+devices_schema={
+    "type":"array",
+    "items":{
+        "type":"object",
+        "properties":{
+            "type":{"type":"string", "enum":["disk","cdrom","xml"] },
+            "image": path_schema,
+            "image metadata": metadata_schema, 
+            "vpci":pci_schema,
+            "xml":xml_text_schema,
+        },
+        "additionalProperties": False,
+        "required": ["type"]
+    }
+}
+
+
+numa_schema = {
+    "type": "object",
+    "properties": {
+        "memory":integer1_schema,
+        "cores":integer1_schema,
+        "paired-threads":integer1_schema,
+        "threads":integer1_schema,
+        "cores-id":{"type":"array","items":integer0_schema},
+        "paired-threads-id":{"type":"array","items":{"type":"array","minItems":2,"maxItems":2,"items":integer0_schema}},
+        "threads-id":{"type":"array","items":integer0_schema},
+        "interfaces":interfaces_schema
+    },
+    "additionalProperties": False,
+    #"required": ["memory"]
+}
+
+vnfc_schema = {
+    "type":"object",
+    "properties":{
+        "name": name_schema,
+        "description": description_schema,
+        "VNFC image": {"oneOf": [path_schema, http_schema]},
+        "image metadata": metadata_schema, 
+        "processor": {
+            "type":"object",
+            "properties":{
+                "model":description_schema,
+                "features":{"type":"array","items":nameshort_schema}
+            },
+            "required": ["model"],
+            "additionalProperties": False
+        },
+        "hypervisor": {
+            "type":"object",
+            "properties":{
+                "type":nameshort_schema,
+                "version":description_schema
+            },
+        },
+        "ram":integer0_schema,
+        "vcpus":integer0_schema,
+        "disk": integer1_schema,
+        "numas": {
+            "type": "array",
+            "items":numa_schema
+        },
+        "bridge-ifaces": bridge_interfaces_schema,
+        "devices": devices_schema
+    },
+    "required": ["name", "VNFC image"],
+    "additionalProperties": False
+}
+
+vnfd_schema_v01 = {
+    "title":"vnfd information schema v0.1",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "vnf":{
+            "type":"object",
+            "properties":{
+                "name": name_schema,
+                "description": description_schema,
+                "class": nameshort_schema,
+                "public": {"type" : "boolean"},
+                "physical": {"type" : "boolean"},
+                "tenant_id": id_schema, #only valid for admin
+                "external-connections": {"type" : "array", "items": external_connection_schema, "minItems":1},
+                "internal-connections": {"type" : "array", "items": internal_connection_schema, "minItems":1},
+                "VNFC":{"type" : "array", "items": vnfc_schema, "minItems":1}
+            },
+            "required": ["name","external-connections"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["vnf"],
+    "additionalProperties": False
+}
+
+#Future VNFD schema to be defined
+vnfd_schema_v02 = {
+    "title":"vnfd information schema v0.2",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "schema_version": schema_version_2,
+        "vnf":{
+            "type":"object",
+            "properties":{
+                "name": name_schema,
+            },
+            "required": ["name"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["vnf", "schema_version"],
+    "additionalProperties": False
+}
+
+#vnfd_schema = vnfd_schema_v01
+#{
+#    "title":"vnfd information schema v0.2",
+#    "$schema": "http://json-schema.org/draft-04/schema#",
+#    "oneOf": [vnfd_schema_v01, vnfd_schema_v02]
+#}
+
+graph_schema = {
+    "title":"graphical scenario descriptor information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "x":      integer0_schema,
+        "y":      integer0_schema,
+        "ifaces": {
+            "type":"object",
+            "properties":{
+                "left": {"type":"array"},
+                "right": {"type":"array"},
+                "bottom": {"type":"array"},
+            }
+        }
+    },
+    "required": ["x","y"]
+}
+
+nsd_schema_v01 = {
+    "title":"network scenario descriptor information schema v0.1",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "name":name_schema,
+        "description": description_schema,
+        "tenant_id": id_schema, #only valid for admin
+        "public": {"type": "boolean"},
+        "topology":{
+            "type":"object",
+            "properties":{
+                "nodes": {
+                    "type":"object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "type":{"type":"string", "enum":["VNF", "other_network", "network", "external_network"]},
+                                "vnf_id": id_schema,
+                                "graph": graph_schema,
+                            },
+                            "patternProperties":{
+                                "^(VNF )?model$": {"type": "string"}
+                            },
+                            "required": ["type"]
+                        }
+                    }
+                },
+                "connections": {
+                    "type":"object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "nodes":{"oneOf":[{"type":"object", "minProperties":2}, {"type":"array", "minLength":1}]},
+                                "type": {"type": "string", "enum":["link", "external_network", "dataplane_net", "bridge_net"]},
+                                "graph": graph_schema
+                            },
+                            "required": ["nodes"]
+                        },
+                    }
+                }
+            },
+            "required": ["nodes"],
+            "additionalProperties": False
+        }
+    },
+    "required": ["name","topology"],
+    "additionalProperties": False
+}
+
+#Future NSD schema to be defined
+nsd_schema_v02 = {
+    "title":"network scenario descriptor information schema v0.2",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "schema_version": schema_version_2,
+        "scenario":{
+            "type":"object",
+            "properties":{
+                "name":name_schema,
+                "description": description_schema,
+                "tenant_id": id_schema, #only valid for admin
+                "public": {"type": "boolean"},
+                "vnfs": {
+                    "type":"object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "vnf_id": id_schema,
+                                "graph": graph_schema,
+                                "vnf_name": name_schema,
+                            },
+                        }
+                    },
+                    "minProperties": 1
+                },
+                "networks": {
+                    "type":"object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "interfaces":{"type":"array", "minLength":1},
+                                "type": {"type": "string", "enum":["dataplane", "bridge"]},
+                                "external" : {"type": "boolean"},
+                                "graph": graph_schema
+                            },
+                            "required": ["interfaces"]
+                        },
+                    }
+                },
+            
+            },
+            "required": ["vnfs", "networks","name"],
+            "additionalProperties": False
+        }
+    },
+    "required": ["scenario","schema_version"],
+    "additionalProperties": False
+}
+
+#scenario_new_schema = {
+#    "title":"new scenario information schema",
+#    "$schema": "http://json-schema.org/draft-04/schema#",
+#    #"oneOf": [nsd_schema_v01, nsd_schema_v02]
+#    "oneOf": [nsd_schema_v01]
+#}
+
+scenario_edit_schema = {
+    "title":"edit scenario information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "name":name_schema,
+        "description": description_schema,
+        "topology":{
+            "type":"object",
+            "properties":{
+                "nodes": {
+                    "type":"object",
+                    "patternProperties":{
+                        "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$": {
+                            "type":"object",
+                            "properties":{
+                                "graph":{
+                                    "type": "object",
+                                    "properties":{
+                                        "x": integer0_schema,
+                                        "y": integer0_schema,
+                                        "ifaces":{ "type": "object"}
+                                    }
+                                },
+                                "description": description_schema,
+                                "name": name_schema
+                            }
+                        }
+                    }
+                }
+            },
+            "required": ["nodes"],
+            "additionalProperties": False
+        }
+    },
+    "additionalProperties": False
+}
+
+scenario_action_schema = {
+    "title":"scenario action information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "start":{
+            "type": "object",
+            "properties": {
+                "instance_name":name_schema,
+                "description":description_schema,
+                "datacenter": {"type": "string"}
+            },
+            "required": ["instance_name"]
+        },
+        "deploy":{
+            "type": "object",
+            "properties": {
+                "instance_name":name_schema,
+                "description":description_schema,
+                "datacenter": {"type": "string"}
+            },
+            "required": ["instance_name"]
+        },
+        "reserve":{
+            "type": "object",
+            "properties": {
+                "instance_name":name_schema,
+                "description":description_schema,
+                "datacenter": {"type": "string"}
+            },
+            "required": ["instance_name"]
+        },
+        "verify":{
+            "type": "object",
+            "properties": {
+                "instance_name":name_schema,
+                "description":description_schema,
+                "datacenter": {"type": "string"}
+            },
+            "required": ["instance_name"]
+        }
+    },
+    "minProperties": 1,
+    "maxProperties": 1,
+    "additionalProperties": False
+}
+
+instance_scenario_create_schema = {
+    "title":"instance scenario create information schema v0.1",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "schema_version": {"type": "string", "enum": ["0.1"]},
+        "instance":{
+            "type":"object",
+            "properties":{
+                "name":name_schema,
+                "description":description_schema,
+                "datacenter": name_schema,
+                "scenario" : name_schema, #can be an UUID or name
+                "action":{"enum": ["deploy","reserve","verify" ]},
+                "connect_mgmt_interfaces": {"oneOff": [{"type":"boolean"}, {"type":"object"}]},# can be true or a dict with datacenter: net_name
+                "vnfs":{             #mapping from scenario to datacenter
+                    "type": "object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "name":   name_schema,#override vnf name
+                                "datacenter": name_schema,
+                                "metadata": {"type": "object"},
+                                "user_data": {"type": "string"}
+                            }
+                        }
+                    },
+                },
+                "networks":{             #mapping from scenario to datacenter
+                    "type": "object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "netmap-create": {"oneOf":[name_schema,{"type": "null"}]}, #datacenter network to use. Null if must be created as an internal net
+                                "netmap-use": name_schema,
+                                "name":   name_schema,#override network name
+                                "datacenter": name_schema,
+                            }
+                        }
+                    },
+                },
+            },
+            "additionalProperties": False,
+            "required": ["scenario", "name"]
+        },
+    },
+    "required": ["instance"],
+    "additionalProperties": False
+    
+}
+
+instance_scenario_action_schema = {
+    "title":"instance scenario action information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "start":{"type": "null"},
+        "pause":{"type": "null"},
+        "resume":{"type": "null"},
+        "shutoff":{"type": "null"},
+        "shutdown":{"type": "null"},
+        "forceOff":{"type": "null"},
+        "rebuild":{"type": "null"},
+        "reboot":{
+            "type": ["object","null"],
+        },
+        "console": {"type": ["string", "null"], "enum": ["novnc", "xvpvnc", "rdp-html5", "spice-html5", None]},
+        "vnfs":{"type": "array", "items":{"type":"string"}},
+        "vms":{"type": "array", "items":{"type":"string"}}
+    },
+    "minProperties": 1,
+    #"maxProperties": 1,
+    "additionalProperties": False
+}
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/vimconn.py b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/vimconn.py
new file mode 100644
index 0000000..3608853
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/vimconn.py
@@ -0,0 +1,391 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+vimconn implement an Abstract class for the vim connector plugins
+ with the definition of the method to be implemented.
+'''
+__author__="Alfonso Tierno"
+__date__ ="$16-oct-2015 11:09:29$"
+
+import logging
+
+#Error variables 
+HTTP_Bad_Request = 400
+HTTP_Unauthorized = 401 
+HTTP_Not_Found = 404 
+HTTP_Method_Not_Allowed = 405 
+HTTP_Request_Timeout = 408
+HTTP_Conflict = 409
+HTTP_Not_Implemented = 501
+HTTP_Service_Unavailable = 503 
+HTTP_Internal_Server_Error = 500 
+
+class vimconnException(Exception):
+    '''Common and base class Exception for all vimconnector exceptions'''
+    def __init__(self, message, http_code=HTTP_Bad_Request):
+        Exception.__init__(self, message)
+        self.http_code = http_code
+
+class vimconnConnectionException(vimconnException):
+    '''Connectivity error with the VIM'''
+    def __init__(self, message, http_code=HTTP_Service_Unavailable):
+        vimconnException.__init__(self, message, http_code)
+    
+class vimconnUnexpectedResponse(vimconnException):
+    '''Get an wrong response from VIM'''
+    def __init__(self, message, http_code=HTTP_Service_Unavailable):
+        vimconnException.__init__(self, message, http_code)
+
+class vimconnAuthException(vimconnException):
+    '''Invalid credentials or authorization to perform this action over the VIM'''
+    def __init__(self, message, http_code=HTTP_Unauthorized):
+        vimconnException.__init__(self, message, http_code)
+
+class vimconnNotFoundException(vimconnException):
+    '''The item is not found at VIM'''
+    def __init__(self, message, http_code=HTTP_Not_Found):
+        vimconnException.__init__(self, message, http_code)
+
+class vimconnConflictException(vimconnException):
+    '''There is a conflict, e.g. more item found than one'''
+    def __init__(self, message, http_code=HTTP_Conflict):
+        vimconnException.__init__(self, message, http_code)
+
+class vimconnNotImplemented(vimconnException):
+    '''The method is not implemented by the connected'''
+    def __init__(self, message, http_code=HTTP_Not_Implemented):
+        vimconnException.__init__(self, message, http_code)
+
+class vimconnector():
+    '''Abstract base class for all the VIM connector plugins
+    These plugins must implement a vimconnector class derived from this 
+    and all these methods
+    ''' 
+    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level="ERROR", config={}):
+        self.id        = uuid
+        self.name      = name
+        self.url       = url
+        self.url_admin = url_admin
+        self.tenant_id = tenant_id
+        self.tenant_name = tenant_name
+        self.user      = user
+        self.passwd    = passwd
+        self.config    = config
+        self.logger = logging.getLogger('mano.vim')
+        self.logger.setLevel( getattr(logging, log_level) )
+        if not self.url_admin:  #try to use normal url 
+            self.url_admin = self.url
+    
+    def __getitem__(self,index):
+        if index=='tenant_id':
+            return self.tenant_id
+        if index=='tenant_name':
+            return self.tenant_name
+        elif index=='id':
+            return self.id
+        elif index=='name':
+            return self.name
+        elif index=='user':
+            return self.user
+        elif index=='passwd':
+            return self.passwd
+        elif index=='url':
+            return self.url
+        elif index=='url_admin':
+            return self.url_admin
+        elif index=="config":
+            return self.config
+        else:
+            raise KeyError("Invalid key '%s'" %str(index))
+        
+    def __setitem__(self,index, value):
+        if index=='tenant_id':
+            self.tenant_id = value
+        if index=='tenant_name':
+            self.tenant_name = value
+        elif index=='id':
+            self.id = value
+        elif index=='name':
+            self.name = value
+        elif index=='user':
+            self.user = value
+        elif index=='passwd':
+            self.passwd = value
+        elif index=='url':
+            self.url = value
+        elif index=='url_admin':
+            self.url_admin = value
+        else:
+            raise KeyError("Invalid key '%s'" %str(index))
+        
+    def new_tenant(self,tenant_name,tenant_description):
+        '''Adds a new tenant to VIM with this name and description,
+        returns the tenant identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def delete_tenant(self,tenant_id,):
+        '''Delete a tenant from VIM'''
+        '''Returns the tenant identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_tenant_list(self, filter_dict={}):
+        '''Obtain tenants of VIM
+        filter_dict can contain the following keys:
+            name: filter by tenant name
+            id: filter by tenant uuid/id
+            <other VIM specific>
+        Returns the tenant list of dictionaries: 
+            [{'name':'<name>, 'id':'<id>, ...}, ...]
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def new_network(self,net_name, net_type, shared=False):
+        '''Adds a tenant network to VIM
+            net_type can be 'bridge','data'.'ptp'.  TODO: this need to be revised 
+            shared is a boolean
+        Returns the network identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_network_list(self, filter_dict={}):
+        '''Obtain tenant networks of VIM
+        Filter_dict can be:
+            name: network name
+            id: network uuid
+            shared: boolean
+            tenant_id: tenant
+            admin_state_up: boolean
+            status: 'ACTIVE'
+        Returns the network list of dictionaries:
+            [{<the fields at Filter_dict plus some VIM specific>}, ...]
+            List can be empty
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_network(self, net_id):
+        '''Obtain network details of net_id VIM network'
+           Return a dict with  the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def delete_network(self, net_id):
+        '''Deletes a tenant network from VIM, provide the network id.
+        Returns the network identifier or raise an exception'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def refresh_nets_status(self, net_list):
+        '''Get the status of the networks
+           Params: the list of network identifiers
+           Returns a dictionary with:
+                net_id:         #VIM id of this network
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, INACTIVE, DOWN (admin down), 
+                                #  BUILD (on building process)
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_flavor(self, flavor_id):
+        '''Obtain flavor details from the  VIM
+            Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+    def new_flavor(self, flavor_data):
+        '''Adds a tenant flavor to VIM
+            flavor_data contains a dictionary with information, keys:
+                name: flavor name
+                ram: memory (cloud type) in MBytes
+                vpcus: cpus (cloud type)
+                extended: EPA parameters
+                  - numas: #items requested in same NUMA
+                        memory: number of 1G huge pages memory
+                        paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
+                        interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
+                          - name: interface name
+                            dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
+                            bandwidth: X Gbps; requested guarantee bandwidth
+                            vpci: requested virtual PCI address   
+                disk: disk size
+                is_public:
+                       
+                
+                    
+                 #TODO to concrete
+        Returns the flavor identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def delete_flavor(self, flavor_id):
+        '''Deletes a tenant flavor from VIM identify by its id
+        Returns the used id or raise an exception'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def new_image(self,image_dict):
+        '''
+        Adds a tenant image to VIM
+        Returns:
+            200, image-id        if the image is created
+            <0, message          if there is an error
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def delete_image(self, image_id):
+        '''Deletes a tenant image from VIM'''
+        '''Returns the HTTP response code and a message indicating details of the success or fail'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_image_id_from_path(self, path):
+        '''Get the image id from image path in the VIM database'''
+        '''Returns:
+             0,"Image not found"   if there are no images with that path
+             1,image-id            if there is one image with that path
+             <0,message            if there was an error (Image not found, error contacting VIM, more than 1 image with that path, etc.) 
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+    def new_vminstance(self,name,description,start,image_id,flavor_id,net_list):
+        '''Adds a VM instance to VIM
+        Params:
+            start: indicates if VM must start or boot in pause mode. Ignored
+            image_id,flavor_id: image and flavor uuid
+            net_list: list of interfaces, each one is a dictionary with:
+                name:
+                net_id: network uuid to connect
+                vpci: virtual vcpi to assign
+                model: interface model, virtio, e2000, ...
+                mac_address: 
+                use: 'data', 'bridge',  'mgmt'
+                type: 'virtual', 'PF', 'VF', 'VFnotShared'
+                vim_id: filled/added by this function
+                #TODO ip, security groups
+        Returns >=0, the instance identifier
+                <0, error_text
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+    def get_vminstance(self,vm_id):
+        '''Returns the VM instance information from VIM'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+    def delete_vminstance(self, vm_id):
+        '''Removes a VM instance from VIM'''
+        '''Returns the instance identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def refresh_vms_status(self, vm_list):
+        '''Get the status of the virtual machines and their interfaces/ports
+           Params: the list of VM identifiers
+           Returns a dictionary with:
+                vm_id:          #VIM id of this Virtual Machine
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), 
+                                #  CREATING (on building process), ERROR
+                                #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                    interfaces:
+                     -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                        mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                        vim_net_id:       #network id where this interface is connected
+                        vim_interface_id: #interface/port VIM id
+                        ip_address:       #null, or text with IPv4, IPv6 address
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+    
+    def action_vminstance(self, vm_id, action_dict):
+        '''Send and action over a VM instance from VIM
+        Returns the vm_id if the action was successfully sent to the VIM'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+    
+    def get_vminstance_console(self,vm_id, console_type="vnc"):
+        '''
+        Get a console for the virtual machine
+        Params:
+            vm_id: uuid of the VM
+            console_type, can be:
+                "novnc" (by default), "xvpvnc" for VNC types, 
+                "rdp-html5" for RDP types, "spice-html5" for SPICE types
+        Returns dict with the console parameters:
+                protocol: ssh, ftp, http, https, ...
+                server:   usually ip address 
+                port:     the http, ssh, ... port 
+                suffix:   extra text, e.g. the http path and query string   
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+#NOT USED METHODS in current version        
+
+    def host_vim2gui(self, host, server_dict):
+        '''Transform host dictionary from VIM format to GUI format,
+        and append to the server_dict
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_hosts_info(self):
+        '''Get the information of deployed hosts
+        Returns the hosts content'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_hosts(self, vim_tenant):
+        '''Get the hosts and deployed instances
+        Returns the hosts content'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_processor_rankings(self):
+        '''Get the processor rankings in the VIM database'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+    
+    def new_host(self, host_data):
+        '''Adds a new host to VIM'''
+        '''Returns status code of the VIM response'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+    
+    def new_external_port(self, port_data):
+        '''Adds a external port to VIM'''
+        '''Returns the port identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+    def new_external_network(self,net_name,net_type):
+        '''Adds a external network to VIM (shared)'''
+        '''Returns the network identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def connect_port_network(self, port_id, network_id, admin=False):
+        '''Connects a external port to a network'''
+        '''Returns status code of the VIM response'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def new_vminstancefromJSON(self, vm_data):
+        '''Adds a VM instance to VIM'''
+        '''Returns the instance identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/vimconn_openvim.py b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/vimconn_openvim.py
new file mode 100644
index 0000000..6286b6a
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/vimconn_openvim.py
@@ -0,0 +1,1372 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+vimconnector implements all the methods to interact with openvim using the openvim API.
+'''
+__author__="Alfonso Tierno, Gerardo Garcia"
+__date__ ="$26-aug-2014 11:09:29$"
+
+from . import vimconn
+import requests
+import json
+import yaml
+import logging
+from .openmano_schemas import id_schema, name_schema, nameshort_schema, description_schema, \
+                            vlan1000_schema, integer0_schema
+from jsonschema import validate as js_v, exceptions as js_e
+
+'''contain the openvim virtual machine status to openmano status'''
+vmStatus2manoFormat={'ACTIVE':'ACTIVE',
+                     'PAUSED':'PAUSED',
+                     'SUSPENDED': 'SUSPENDED',
+                     'INACTIVE':'INACTIVE',
+                     'CREATING':'BUILD',
+                     'ERROR':'ERROR','DELETED':'DELETED'
+                     }
+netStatus2manoFormat={'ACTIVE':'ACTIVE','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED', 'DOWN':'DOWN'
+                     }
+
+
+host_schema = {
+    "type":"object",
+    "properties":{
+        "id": id_schema,
+        "name": name_schema,
+    },
+    "required": ["id"]
+}
+image_schema = {
+    "type":"object",
+    "properties":{
+        "id": id_schema,
+        "name": name_schema,
+    },
+    "required": ["id","name"]
+}
+flavor_schema = {
+    "type":"object",
+    "properties":{
+        "id": id_schema,
+        "name": name_schema,
+    },
+    "required": ["id","name"]
+}
+server_schema = {
+    "type":"object",
+    "properties":{
+        "id":id_schema,
+        "name": name_schema,
+    },
+    "required": ["id","name"]
+}
+new_host_response_schema = {
+    "title":"host response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "host": host_schema
+    },
+    "required": ["host"],
+    "additionalProperties": False
+}
+
+get_images_response_schema = {
+    "title":"openvim images response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "images":{
+            "type":"array",
+            "items": image_schema,
+        }
+    },
+    "required": ["images"],
+    "additionalProperties": False
+}
+
+
+get_flavors_response_schema = {
+    "title":"openvim flavors response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "flavors":{
+            "type":"array",
+            "items": flavor_schema,
+        }
+    },
+    "required": ["flavors"],
+    "additionalProperties": False
+}
+
+
+get_hosts_response_schema = {
+    "title":"openvim hosts response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "hosts":{
+            "type":"array",
+            "items": host_schema,
+        }
+    },
+    "required": ["hosts"],
+    "additionalProperties": False
+}
+
+get_host_detail_response_schema = new_host_response_schema # TODO: Content is not parsed yet
+
+get_server_response_schema = {
+    "title":"openvim server response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "servers":{
+            "type":"array",
+            "items": server_schema,
+        }
+    },
+    "required": ["servers"],
+    "additionalProperties": False
+}
+
+new_tenant_response_schema = {
+    "title":"tenant response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "tenant":{
+            "type":"object",
+            "properties":{
+                "id": id_schema,
+                "name": nameshort_schema,
+                "description":description_schema,
+                "enabled":{"type" : "boolean"}
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["tenant"],
+    "additionalProperties": False
+}
+
+new_network_response_schema = {
+    "title":"network response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "network":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+                "name":name_schema,
+                "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
+                "shared":{"type":"boolean"},
+                "tenant_id":id_schema,
+                "admin_state_up":{"type":"boolean"},
+                "vlan":vlan1000_schema
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["network"],
+    "additionalProperties": False
+}
+
+
+# get_network_response_schema = {
+#     "title":"get network response information schema",
+#     "$schema": "http://json-schema.org/draft-04/schema#",
+#     "type":"object",
+#     "properties":{
+#         "network":{
+#             "type":"object",
+#             "properties":{
+#                 "id":id_schema,
+#                 "name":name_schema,
+#                 "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
+#                 "shared":{"type":"boolean"},
+#                 "tenant_id":id_schema,
+#                 "admin_state_up":{"type":"boolean"},
+#                 "vlan":vlan1000_schema
+#             },
+#             "required": ["id"]
+#         }
+#     },
+#     "required": ["network"],
+#     "additionalProperties": False
+# }
+
+
+new_port_response_schema = {
+    "title":"port response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "port":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["port"],
+    "additionalProperties": False
+}
+
+get_flavor_response_schema = {
+    "title":"openvim flavors response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "flavor":{
+            "type":"object",
+            "properties":{
+                "id":   id_schema,
+                "name": name_schema,
+                #"extended": {"type":"object"},
+            },
+            "required": ["id", "name"],
+        }
+    },
+    "required": ["flavor"],
+    "additionalProperties": False
+}
+
+new_flavor_response_schema = {
+    "title":"flavor response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "flavor":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["flavor"],
+    "additionalProperties": False
+}
+
+get_image_response_schema = {
+    "title":"openvim images response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "image":{
+            "type":"object",
+            "properties":{
+                "id":   id_schema,
+                "name": name_schema,
+            },
+            "required": ["id", "name"],
+        }
+    },
+    "required": ["image"],
+    "additionalProperties": False
+}
+new_image_response_schema = {
+    "title":"image response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "image":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["image"],
+    "additionalProperties": False
+}
+
+new_vminstance_response_schema = {
+    "title":"server response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "server":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["server"],
+    "additionalProperties": False
+}
+
+get_processor_rankings_response_schema = {
+    "title":"processor rankings information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "rankings":{
+            "type":"array",
+            "items":{
+                "type":"object",
+                "properties":{
+                    "model": description_schema,
+                    "value": integer0_schema
+                },
+                "additionalProperties": False,
+                "required": ["model","value"]
+            }
+        },
+        "additionalProperties": False,
+        "required": ["rankings"]
+    }
+}
+
+class vimconnector(vimconn.vimconnector):
+    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,log_level="DEBUG",config={}):
+        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config)
+        self.tenant = None
+        self.headers_req = {'content-type': 'application/json'}
+        self.logger = logging.getLogger('mano.vim.openvim')
+        if tenant_id:
+            self.tenant = tenant_id
+
+    def __setitem__(self,index, value):
+        '''Set individuals parameters 
+        Throw TypeError, KeyError
+        '''
+        if index=='tenant_id':
+            self.tenant = value
+        elif index=='tenant_name':
+            self.tenant = None
+        vimconn.vimconnector.__setitem__(self,index, value)    
+
+    def _get_my_tenant(self):
+        '''Obtain uuid of my tenant from name
+        '''
+        if self.tenant:
+            return self.tenant
+
+        url = self.url+'/tenants?name='+ self.tenant_name
+        self.logger.info("Getting VIM tenant_id GET %s", url)
+        vim_response = requests.get(url, headers = self.headers_req)
+        self._check_http_request_response(vim_response)
+        try:
+            tenant_list = vim_response.json()["tenants"]
+            if len(tenant_list) == 0:
+                raise vimconn.vimconnNotFoundException("No tenant found for name '%s'" % str(self.tenant_name))
+            elif len(tenant_list) > 1:
+                raise vimconn.vimconnConflictException ("More that one tenant found for name '%s'" % str(self.tenant_name))
+            self.tenant = tenant_list[0]["id"]
+            return self.tenant
+        except Exception as e:
+            raise vimconn.vimconnUnexpectedResponse("Get VIM tenant {} '{}'".format(type(e).__name__, str(e)))
+
+    def _format_jsonerror(self,http_response):
+        #DEPRECATED, to delete in the future
+        try:
+            data = http_response.json()
+            return data["error"]["description"]
+        except:
+            return http_response.text
+
+    def _format_in(self, http_response, schema):
+        #DEPRECATED, to delete in the future
+        try:
+            client_data = http_response.json()
+            js_v(client_data, schema)
+            #print "Input data: ", str(client_data)
+            return True, client_data
+        except js_e.ValidationError as exc:
+            print("validate_in error, jsonschema exception {} at {}",exc.message, exc.path)
+            return False, ("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
+    
+    def _remove_extra_items(self, data, schema):
+        deleted=[]
+        if type(data) is tuple or type(data) is list:
+            for d in data:
+                a= self._remove_extra_items(d, schema['items'])
+                if a is not None: deleted.append(a)
+        elif type(data) is dict:
+            #for k in data.keys():
+            for k in list(data):
+                if 'properties' not in schema or k not in schema['properties'].keys():
+                    del data[k]
+                    deleted.append(k)
+                else:
+                    a = self._remove_extra_items(data[k], schema['properties'][k])
+                    if a is not None:  deleted.append({k:a})
+        if len(deleted) == 0: return None
+        elif len(deleted) == 1: return deleted[0]
+        else: return deleted
+        
+    def _format_request_exception(self, request_exception):
+        '''Transform a request exception into a vimconn exception'''
+        if isinstance(request_exception, js_e.ValidationError):
+            raise vimconn.vimconnUnexpectedResponse("jsonschema exception '{}' at '{}'".format(request_exception.message, request_exception.path))            
+        elif isinstance(request_exception, requests.exceptions.HTTPError):
+            raise vimconn.vimconnUnexpectedResponse(type(request_exception).__name__ + ": " + str(request_exception))
+        else:
+            raise vimconn.vimconnConnectionException(type(request_exception).__name__ + ": " + str(request_exception))
+
+    def _check_http_request_response(self, request_response):
+        '''Raise a vimconn exception if the response is not Ok'''
+        if request_response.status_code >= 200 and  request_response.status_code < 300:
+            return
+        if request_response.status_code == vimconn.HTTP_Unauthorized:
+            raise vimconn.vimconnAuthException(request_response.text)
+        elif request_response.status_code == vimconn.HTTP_Not_Found:
+            raise vimconn.vimconnNotFoundException(request_response.text)
+        elif request_response.status_code == vimconn.HTTP_Conflict:
+            raise vimconn.vimconnConflictException(request_response.text)
+        else: 
+            raise vimconn.vimconnUnexpectedResponse("VIM HTTP_response {}, {}".format(request_response.status_code, str(request_response.text)))
+
+    def new_tenant(self,tenant_name,tenant_description):
+        '''Adds a new tenant to VIM with this name and description, returns the tenant identifier'''
+        #print "VIMConnector: Adding a new tenant to VIM"
+        payload_dict = {"tenant": {"name":tenant_name,"description": tenant_description, "enabled": True}}
+        payload_req = json.dumps(payload_dict)
+        try:
+            url = self.url_admin+'/tenants'
+            self.logger.info("Adding a new tenant %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_tenant_response_schema)
+            #r = self._remove_extra_items(response, new_tenant_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            tenant_id = response['tenant']['id']
+            return tenant_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def delete_tenant(self,tenant_id):
+        '''Delete a tenant from VIM. Returns the old tenant identifier'''
+        try:
+            url = self.url_admin+'/tenants/'+tenant_id
+            self.logger.info("Delete a tenant DELETE %s", url)
+            vim_response = requests.delete(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return tenant_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def get_tenant_list(self, filter_dict={}):
+        '''Obtain tenants of VIM
+        filter_dict can contain the following keys:
+            name: filter by tenant name
+            id: filter by tenant uuid/id
+            <other VIM specific>
+        Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
+        '''
+        filterquery=[]
+        filterquery_text=''
+        for k,v in filter_dict.items():
+            filterquery.append(str(k)+'='+str(v))
+        if len(filterquery)>0:
+            filterquery_text='?'+ '&'.join(filterquery)
+        try:
+            url = self.url+'/tenants'+filterquery_text
+            self.logger.info("get_tenant_list GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return vim_response.json()["tenants"]
+        except requests.exceptions.RequestException as e:
+            self._format_request_exception(e)
+
+    def new_network(self,net_name,net_type, shared=False, **vim_specific):
+        '''Adds a tenant network to VIM'''
+        '''Returns the network identifier'''
+        try:
+            self._get_my_tenant()
+            if net_type=="bridge":
+                net_type="bridge_data"
+            payload_req = {"name": net_name, "type": net_type, "tenant_id": self.tenant, "shared": shared}
+            payload_req.update(vim_specific)
+            url = self.url+'/networks'
+            self.logger.info("Adding a new network POST: %s  DATA: %s", url, str(payload_req))
+            print(payload_req)
+            vim_response = requests.post(url, headers = self.headers_req, data=json.dumps({"network": payload_req}) )
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_network_response_schema)
+            #r = self._remove_extra_items(response, new_network_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            network_id = response['network']['id']
+            return network_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+        
+    def get_network_list(self, filter_dict={}):
+        '''Obtain tenant networks of VIM
+        Filter_dict can be:
+            name: network name
+            id: network uuid
+            public: boolean
+            tenant_id: tenant
+            admin_state_up: boolean
+            status: 'ACTIVE'
+        Returns the network list of dictionaries
+        '''
+        try:
+            if 'tenant_id' not in filter_dict:
+                filter_dict["tenant_id"] = self._get_my_tenant()
+            elif not filter_dict["tenant_id"]:
+                del filter_dict["tenant_id"]
+            filterquery=[]
+            filterquery_text=''
+            for k,v in filter_dict.items():
+                filterquery.append(str(k)+'='+str(v))
+            if len(filterquery)>0:
+                filterquery_text='?'+ '&'.join(filterquery)
+            url = self.url+'/networks'+filterquery_text
+            self.logger.info("Getting network list GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            return response['networks']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def get_network(self, net_id):
+        '''Obtain network details of network id'''
+        try:
+            url = self.url+'/networks/'+net_id
+            self.logger.info("Getting network GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            return response['network']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+            
+    def delete_network(self, net_id):
+        '''Deletes a tenant network from VIM'''
+        '''Returns the network identifier'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/networks/'+net_id
+            self.logger.info("Deleting VIM network DELETE %s", url)
+            vim_response = requests.delete(url, headers=self.headers_req)
+            self._check_http_request_response(vim_response)
+            #self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return net_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+
+    def get_flavor_list(self):
+        '''Obtain flavor details from the  VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/flavors'
+            self.logger.info("Getting flavor GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_flavors_response_schema)
+            r = self._remove_extra_items(response, get_flavors_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['flavors']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+        
+
+    def get_flavor(self, flavor_id):
+        '''Obtain flavor details from the  VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/flavors/'+flavor_id
+            self.logger.info("Getting flavor GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_flavor_response_schema)
+            #r = self._remove_extra_items(response, get_flavor_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['flavor']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+        
+    def new_flavor(self, flavor_data):
+        '''Adds a tenant flavor to VIM'''
+        '''Returns the flavor identifier'''
+        try:
+            self._get_my_tenant()
+            payload_req = json.dumps({'flavor': flavor_data})
+            url = self.url+'/'+self.tenant+'/flavors'
+            self.logger.info("Adding a new VIM flavor POST %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_flavor_response_schema)
+            r = self._remove_extra_items(response, new_flavor_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            flavor_id = response['flavor']['id']
+            return flavor_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def delete_flavor(self,flavor_id):
+        '''Deletes a tenant flavor from VIM'''
+        '''Returns the old flavor_id'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/flavors/'+flavor_id
+            self.logger.info("Deleting VIM flavor DELETE %s", url)
+            vim_response = requests.delete(url, headers=self.headers_req)
+            self._check_http_request_response(vim_response)
+            #self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return flavor_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def get_image_list(self):
+        '''Obtain image details from the  VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/images'
+            self.logger.info("Getting image GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_images_response_schema)
+            #r = self._remove_extra_items(response, get_images_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['images']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def get_image(self, image_id):
+        '''Obtain image details from the  VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/images/'+image_id
+            self.logger.info("Getting image GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_image_response_schema)
+            #r = self._remove_extra_items(response, get_image_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['image']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def new_image(self,image_dict):
+        ''' Adds a tenant image to VIM, returns image_id'''
+        try:
+            self._get_my_tenant()
+            new_image_dict={'name': image_dict['name']}
+            if image_dict.get('description'):
+                new_image_dict['description'] = image_dict['description']
+            if image_dict.get('metadata'):
+                new_image_dict['metadata'] = yaml.load(image_dict['metadata'])
+            if image_dict.get('location'):
+                new_image_dict['path'] = image_dict['location']
+            payload_req = json.dumps({"image":new_image_dict})
+            url=self.url + '/' + self.tenant + '/images'
+            self.logger.info("Adding a new VIM image POST %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_image_response_schema)
+            r = self._remove_extra_items(response, new_image_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            image_id = response['image']['id']
+            return image_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+            
+    def delete_image(self, image_id):
+        '''Deletes a tenant image from VIM'''
+        '''Returns the deleted image_id'''
+        try:
+            self._get_my_tenant()
+            url = self.url + '/'+ self.tenant +'/images/'+image_id
+            self.logger.info("Deleting VIM image DELETE %s", url)
+            vim_response = requests.delete(url, headers=self.headers_req)
+            self._check_http_request_response(vim_response)
+            #self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return image_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    
+    def get_image_id_from_path(self, path):
+        '''Get the image id from image path in the VIM database'''
+        try:
+            self._get_my_tenant()
+            url=self.url + '/' + self.tenant + '/images?path='+path
+            self.logger.info("Getting images GET %s", url)
+            vim_response = requests.get(url)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_images_response_schema)
+            #r = self._remove_extra_items(response, get_images_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            if len(response['images'])==0:
+                raise vimconn.vimconnNotFoundException("Image not found at VIM with path '%s'", path)
+            elif len(response['images'])>1:
+                raise vimconn.vimconnConflictException("More than one image found at VIM with path '%s'", path)
+            return response['images'][0]['id']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def new_vminstancefromJSON(self, vm_data):
+        '''Adds a VM instance to VIM'''
+        '''Returns the instance identifier'''
+        try:
+            self._get_my_tenant()
+        except Exception as e:
+            return -vimconn.HTTP_Not_Found, str(e)
+        print("VIMConnector: Adding a new VM instance from JSON to VIM")
+        payload_req = vm_data
+        try:
+            vim_response = requests.post(self.url+'/'+self.tenant+'/servers', headers = self.headers_req, data=payload_req)
+        except requests.exceptions.RequestException as  e:
+            print("new_vminstancefromJSON Exception: ", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print(vim_response)
+        #print vim_response.status_code
+        if vim_response.status_code == 200:
+            #print vim_response.json()
+            #print json.dumps(vim_response.json(), indent=4)
+            res,http_content = self._format_in(vim_response, new_image_response_schema)
+            #print http_content
+            if res:
+                r = self._remove_extra_items(http_content, new_image_response_schema)
+                if r is not None: print("Warning: remove extra items {}", r)
+                #print http_content
+                vminstance_id = http_content['server']['id']
+                print("Tenant image id: ",vminstance_id)
+                return vim_response.status_code,vminstance_id
+            else: return -vimconn.HTTP_Bad_Request,http_content
+        else:
+            #print vim_response.text
+            jsonerror = self._format_jsonerror(vim_response)
+            text = 'Error in VIM "%s": not possible to add new vm instance. HTTP Response: %d. Error: %s' % (self.url, vim_response.status_code, jsonerror)
+            #print text
+            return -vim_response.status_code,text
+
+    def new_vminstance(self,name,description,start,image_id,flavor_id,net_list):
+        '''Adds a VM instance to VIM
+        Params:
+            start: indicates if VM must start or boot in pause mode. Ignored
+            image_id,flavor_id: image and flavor uuid
+            net_list: list of interfaces, each one is a dictionary with:
+                name:
+                net_id: network uuid to connect
+                vpci: virtual vcpi to assign
+                model: interface model, virtio, e2000, ...
+                mac_address: 
+                use: 'data', 'bridge',  'mgmt'
+                type: 'virtual', 'PF', 'VF', 'VFnotShared'
+                vim_id: filled/added by this function
+                #TODO ip, security groups
+        Returns the instance identifier
+        '''
+        try:
+            self._get_my_tenant()
+#            net_list = []
+#            for k,v in net_dict.items():
+#                print k,v
+#                net_list.append('{"name":"' + k + '", "uuid":"' + v + '"}')
+#            net_list_string = ', '.join(net_list) 
+            virtio_net_list=[]
+            for net in net_list:
+                if not net.get("net_id"):
+                    continue
+                net_dict={'uuid': net["net_id"]}
+                if net.get("type"):        net_dict["type"] = net["type"]
+                if net.get("name"):        net_dict["name"] = net["name"]
+                if net.get("vpci"):        net_dict["vpci"] = net["vpci"]
+                if net.get("model"):       net_dict["model"] = net["model"]
+                if net.get("mac_address"): net_dict["mac_address"] = net["mac_address"]
+                virtio_net_list.append(net_dict)
+            payload_dict={  "name":        name,
+                            "description": description,
+                            "imageRef":    image_id,
+                            "flavorRef":   flavor_id,
+                            "networks": virtio_net_list
+                        }
+            if start != None:
+                payload_dict["start"] = start
+            payload_req = json.dumps({"server": payload_dict})
+            url = self.url+'/'+self.tenant+'/servers'
+            self.logger.info("Adding a new vm POST %s DATA %s", url, payload_req)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_vminstance_response_schema)
+            #r = self._remove_extra_items(response, new_vminstance_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            vminstance_id = response['server']['id']
+
+            #connect data plane interfaces to network
+            for net in net_list:
+                if net["type"]=="virtual":
+                    if not net.get("net_id"):
+                        continue
+                    for iface in response['server']['networks']:
+                        if "name" in net:
+                            if net["name"]==iface["name"]:
+                                net["vim_id"] = iface['iface_id']
+                                break
+                        elif "net_id" in net:
+                            if net["net_id"]==iface["net_id"]:
+                                net["vim_id"] = iface['iface_id']
+                                break
+                else: #dataplane
+                    for numa in response['server'].get('extended',{}).get('numas',() ):
+                        for iface in numa.get('interfaces',() ):
+                            if net['name'] == iface['name']:
+                                net['vim_id'] = iface['iface_id']
+                                #Code bellow is not needed, current openvim connect dataplane interfaces 
+                                #if net.get("net_id"):
+                                ##connect dataplane interface
+                                #    result, port_id = self.connect_port_network(iface['iface_id'], net["net_id"])
+                                #    if result < 0:
+                                #        error_text = "Error attaching port %s to network %s: %s." % (iface['iface_id'], net["net_id"], port_id)
+                                #        print "new_vminstance: " + error_text
+                                #        self.delete_vminstance(vminstance_id)
+                                #        return result, error_text
+                                break
+        
+            return vminstance_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+
+    def get_vminstance_list(self):
+        '''Obtain VM instance list from the  VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/servers'
+            self.logger.info("Getting servers GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_server_response_schema)
+            r = self._remove_extra_items(response, get_server_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['servers']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+        
+        
+    def get_vminstance(self, vm_id):
+        '''Returns the VM instance information from VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/servers/'+vm_id
+            self.logger.info("Getting vm GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_vminstance_response_schema)
+            #r = self._remove_extra_items(response, new_vminstance_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['server']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+        
+    def delete_vminstance(self, vm_id):
+        '''Removes a VM instance from VIM, returns the deleted vm_id'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/servers/'+vm_id
+            self.logger.info("Deleting VIM vm DELETE %s", url)
+            vim_response = requests.delete(url, headers=self.headers_req)
+            self._check_http_request_response(vim_response)
+            #self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return vm_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def refresh_vms_status(self, vm_list):
+        '''Refreshes the status of the virtual machines'''
+        try:
+            self._get_my_tenant()
+        except requests.exceptions.RequestException as e:
+            self._format_request_exception(e)
+        vm_dict={}
+        for vm_id in vm_list:
+            vm={}
+            #print "VIMConnector refresh_tenant_vms and nets: Getting tenant VM instance information from VIM"
+            try:
+                url = self.url+'/'+self.tenant+'/servers/'+ vm_id
+                self.logger.info("Getting vm GET %s", url)
+                vim_response = requests.get(url, headers = self.headers_req)
+                self._check_http_request_response(vim_response)
+                response = vim_response.json()
+                js_v(response, new_vminstance_response_schema)
+                if response['server']['status'] in vmStatus2manoFormat:
+                    vm['status'] = vmStatus2manoFormat[ response['server']['status']  ]
+                else:
+                    vm['status'] = "OTHER"
+                    vm['error_msg'] = "VIM status reported " + response['server']['status']
+                if response['server'].get('last_error'):
+                    vm['error_msg'] = response['server']['last_error']
+                vm["vim_info"] = yaml.safe_dump(response['server'])
+                #get interfaces info
+                try:
+                    management_ip = False
+                    url2 = self.url+'/ports?device_id='+ vm_id
+                    self.logger.info("Getting PORTS GET %s", url2)
+                    vim_response2 = requests.get(url2, headers = self.headers_req)
+                    self._check_http_request_response(vim_response2)
+                    client_data = vim_response2.json()
+                    if isinstance(client_data.get("ports"), list):
+                        vm["interfaces"]=[]
+                    for port in client_data.get("ports"):
+                        interface={}
+                        interface['vim_info']  = yaml.safe_dump(port)
+                        interface["mac_address"] = port.get("mac_address")
+                        interface["vim_net_id"] = port["network_id"]
+                        interface["vim_interface_id"] = port["id"]
+                        interface["ip_address"] = port.get("ip_address")
+                        if interface["ip_address"]:
+                            management_ip = True
+                        if interface["ip_address"] == "0.0.0.0":
+                            interface["ip_address"] = None
+                        vm["interfaces"].append(interface)
+                        
+                except Exception as e:
+                    self.logger.error("refresh_vms_and_nets. Port get %s: %s", type(e).__name__, str(e))
+
+                if vm['status'] == "ACTIVE" and not management_ip:
+                    vm['status'] = "ACTIVE:NoMgmtIP"
+                    
+            except vimconn.vimconnNotFoundException as e:
+                self.logger.error("Exception getting vm status: %s", str(e))
+                vm['status'] = "DELETED"
+                vm['error_msg'] = str(e)
+            except (requests.exceptions.RequestException, js_e.ValidationError, vimconn.vimconnException) as e:
+                self.logger.error("Exception getting vm status: %s", str(e))
+                vm['status'] = "VIM_ERROR"
+                vm['error_msg'] = str(e)
+            vm_dict[vm_id] = vm
+        return vm_dict
+
+    def refresh_nets_status(self, net_list):
+        '''Get the status of the networks
+           Params: the list of network identifiers
+           Returns a dictionary with:
+                net_id:         #VIM id of this network
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, INACTIVE, DOWN (admin down), 
+                                #  BUILD (on building process)
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+
+        '''
+        try:
+            self._get_my_tenant()
+        except requests.exceptions.RequestException as e:
+            self._format_request_exception(e)
+        
+        net_dict={}
+        for net_id in net_list:
+            net = {}
+            #print "VIMConnector refresh_tenant_vms_and_nets: Getting tenant network from VIM (tenant: " + str(self.tenant) + "): "
+            try:
+                net_vim = self.get_network(net_id)
+                if net_vim['status'] in netStatus2manoFormat:
+                    net["status"] = netStatus2manoFormat[ net_vim['status'] ]
+                else:
+                    net["status"] = "OTHER"
+                    net["error_msg"] = "VIM status reported " + net_vim['status']
+                    
+                if net["status"] == "ACTIVE" and not net_vim['admin_state_up']:
+                    net["status"] = "DOWN"
+                if net_vim.get('last_error'):
+                    net['error_msg'] = net_vim['last_error']
+                net["vim_info"] = yaml.safe_dump(net_vim)
+            except vimconn.vimconnNotFoundException as e:
+                self.logger.error("Exception getting net status: %s", str(e))
+                net['status'] = "DELETED"
+                net['error_msg'] = str(e)
+            except (requests.exceptions.RequestException, js_e.ValidationError, vimconn.vimconnException) as e:
+                self.logger.error("Exception getting net status: %s", str(e))
+                net['status'] = "VIM_ERROR"
+                net['error_msg'] = str(e)
+            net_dict[net_id] = net
+        return net_dict
+    
+    def action_vminstance(self, vm_id, action_dict):
+        '''Send and action over a VM instance from VIM'''
+        '''Returns the status'''
+        try:
+            self._get_my_tenant()
+            if "console" in action_dict:
+                raise vimconn.vimconnException("getting console is not available at openvim", http_code=vimconn.HTTP_Service_Unavailable)
+            url = self.url+'/'+self.tenant+'/servers/'+vm_id+"/action"
+            self.logger.info("Action over VM instance POST %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=json.dumps(action_dict) )
+            self._check_http_request_response(vim_response)
+            return vm_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+#NOT USED METHODS in current version        
+  
+    def host_vim2gui(self, host, server_dict):
+        '''Transform host dictionary from VIM format to GUI format,
+        and append to the server_dict
+        '''
+        if type(server_dict) is not dict: 
+            print('vimconnector.host_vim2gui() ERROR, param server_dict must be a dictionary')
+            return
+        RAD={}
+        occupation={}
+        for numa in host['host']['numas']:
+            RAD_item={}
+            occupation_item={}
+            #memory
+            RAD_item['memory']={'size': str(numa['memory'])+'GB', 'eligible': str(numa['hugepages'])+'GB'}
+            occupation_item['memory']= str(numa['hugepages_consumed'])+'GB'
+            #cpus
+            RAD_item['cpus']={}
+            RAD_item['cpus']['cores'] = []
+            RAD_item['cpus']['eligible_cores'] = []
+            occupation_item['cores']=[]
+            for _ in range(0, len(numa['cores']) / 2):
+                RAD_item['cpus']['cores'].append( [] )
+            for core in numa['cores']:
+                RAD_item['cpus']['cores'][core['core_id']].append(core['thread_id'])
+                if not 'status' in core: RAD_item['cpus']['eligible_cores'].append(core['thread_id'])
+                if 'instance_id' in core: occupation_item['cores'].append(core['thread_id'])
+            #ports
+            RAD_item['ports']={}
+            occupation_item['ports']={}
+            for iface in numa['interfaces']:
+                RAD_item['ports'][ iface['pci'] ] = 'speed:'+str(iface['Mbps'])+'M'
+                occupation_item['ports'][ iface['pci'] ] = { 'occupied': str(100*iface['Mbps_consumed'] / iface['Mbps']) + "%" }
+                
+            RAD[ numa['numa_socket'] ] = RAD_item
+            occupation[ numa['numa_socket'] ] = occupation_item
+        server_dict[ host['host']['name'] ] = {'RAD':RAD, 'occupation':occupation}
+
+    def get_hosts_info(self):
+        '''Get the information of deployed hosts
+        Returns the hosts content'''
+    #obtain hosts list
+        url=self.url+'/hosts'
+        try:
+            vim_response = requests.get(url)
+        except requests.exceptions.RequestException as e:
+            print("get_hosts_info Exception: {}", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print("vim get {}, response:{} {}",url, vim_response.status_code, vim_response.json())
+        #print vim_response.status_code
+        #print json.dumps(vim_response.json(), indent=4)
+        if vim_response.status_code != 200:
+            #TODO: get error
+            print('vimconnector.get_hosts_info error getting host list %d %s',vim_response.status_code, vim_response.json())
+            return -vim_response.status_code, "Error getting host list"
+        
+        res,hosts = self._format_in(vim_response, get_hosts_response_schema)
+            
+        if res==False:
+            print("vimconnector.get_hosts_info error parsing GET HOSTS vim response", hosts)
+            return vimconn.HTTP_Internal_Server_Error, hosts
+    #obtain hosts details
+        hosts_dict={}
+        for host in hosts['hosts']:
+            url=self.url+'/hosts/'+host['id']
+            try:
+                vim_response = requests.get(url)
+            except requests.exceptions.RequestException as e:
+                print("get_hosts_info Exception: ", e.args)
+                return -vimconn.HTTP_Not_Found, str(e.args[0])
+            print("vim get {} response{} {}", url,vim_response.status_code, vim_response.json())
+            if vim_response.status_code != 200:
+                print('vimconnector.get_hosts_info error getting detailed host %d %s', vim_response.status_code, vim_response.json())
+                continue
+            res,host_detail = self._format_in(vim_response, get_host_detail_response_schema)
+            if res==False:
+                print("vimconnector.get_hosts_info error parsing GET HOSTS/%s vim response", host['id'], host_detail)
+                continue
+            #print 'host id '+host['id'], json.dumps(host_detail, indent=4)
+            self.host_vim2gui(host_detail, hosts_dict)
+        return 200, hosts_dict
+
+    def get_hosts(self, vim_tenant):
+        '''Get the hosts and deployed instances
+        Returns the hosts content'''
+    #obtain hosts list
+        url=self.url+'/hosts'
+        try:
+            vim_response = requests.get(url)
+        except requests.exceptions.RequestException as e:
+            print("get_hosts Exception: ", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print("vim get {} response:{} {}",   url , vim_response.status_code, vim_response.json())
+        #print vim_response.status_code
+        #print json.dumps(vim_response.json(), indent=4)
+        if vim_response.status_code != 200:
+            #TODO: get error
+            print('vimconnector.get_hosts error getting host list %d %s', vim_response.status_code, vim_response.json())
+            return -vim_response.status_code, "Error getting host list"
+        
+        res,hosts = self._format_in(vim_response, get_hosts_response_schema)
+            
+        if res==False:
+            print("vimconnector.get_host error parsing GET HOSTS vim response {}", hosts)
+            return vimconn.HTTP_Internal_Server_Error, hosts
+    #obtain instances from hosts
+        for host in hosts['hosts']:
+            url=self.url+'/' + vim_tenant + '/servers?hostId='+host['id']
+            try:
+                vim_response = requests.get(url)
+            except requests.exceptions.RequestException as e:
+                print("get_hosts Exception:{}", e.args)
+                return -vimconn.HTTP_Not_Found, str(e.args[0])
+            print("vim get {} response: {} {}",  url, vim_response.status_code, vim_response.json())
+            if vim_response.status_code != 200:
+                print('vimconnector.get_hosts error getting instances at host %d %s',vim_response.status_code, vim_response.json())
+                continue
+            res,servers = self._format_in(vim_response, get_server_response_schema)
+            if res==False:
+                print("vimconnector.get_host error parsing GET SERVERS/%s vim response",host['id'], servers)
+                continue
+            #print 'host id '+host['id'], json.dumps(host_detail, indent=4)
+            host['instances'] = servers['servers']
+        return 200, hosts['hosts']
+
+    def get_processor_rankings(self):
+        '''Get the processor rankings in the VIM database'''
+        url=self.url+'/processor_ranking'
+        try:
+            vim_response = requests.get(url)
+        except requests.exceptions.RequestException as e:
+            print("get_processor_rankings Exception:{}", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print("vim get {} response: {} {}", url, vim_response.status_code, vim_response.json())
+        #print vim_response.status_code
+        #print json.dumps(vim_response.json(), indent=4)
+        if vim_response.status_code != 200:
+            #TODO: get error
+            print('vimconnector.get_processor_rankings error getting processor rankings %d %s',vim_response.status_code, vim_response.json())
+            return -vim_response.status_code, "Error getting processor rankings"
+        
+        res,rankings = self._format_in(vim_response, get_processor_rankings_response_schema)
+        return res, rankings['rankings']
+    
+    def new_host(self, host_data):
+        '''Adds a new host to VIM'''
+        '''Returns status code of the VIM response'''
+        payload_req = host_data
+        try:
+            url = self.url_admin+'/hosts'
+            self.logger.info("Adding a new host POST %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_host_response_schema)
+            r = self._remove_extra_items(response, new_host_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            host_id = response['host']['id']
+            return host_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+    
+    def new_external_port(self, port_data):
+        '''Adds a external port to VIM'''
+        '''Returns the port identifier'''
+        #TODO change to logging exception code policies
+        print("VIMConnector: Adding a new external port")
+        payload_req = port_data
+        try:
+            vim_response = requests.post(self.url_admin+'/ports', headers = self.headers_req, data=payload_req)
+        except requests.exceptions.RequestException as e:
+            self.logger.error("new_external_port Exception: ", str(e))
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print(vim_response)
+        #print vim_response.status_code
+        if vim_response.status_code == 200:
+        #print vim_response.json()
+        #print json.dumps(vim_response.json(), indent=4)
+            res, http_content = self._format_in(vim_response, new_port_response_schema)
+        #print http_content
+            if res:
+                r = self._remove_extra_items(http_content, new_port_response_schema)
+                if r is not None: print("Warning: remove extra items ", r)
+                #print http_content
+                port_id = http_content['port']['id']
+                print("Port id: {}",port_id)
+                return vim_response.status_code,port_id
+            else: return -vimconn.HTTP_Bad_Request,http_content
+        else:
+            #print vim_response.text
+            jsonerror = self._format_jsonerror(vim_response)
+            text = 'Error in VIM "%s": not possible to add new external port. HTTP Response: %d. Error: %s' % (self.url_admin, vim_response.status_code, jsonerror)
+            #print text
+            return -vim_response.status_code,text
+        
+    def new_external_network(self,net_name,net_type):
+        '''Adds a external network to VIM (shared)'''
+        '''Returns the network identifier'''
+        #TODO change to logging exception code policies
+        print("VIMConnector: Adding external shared network to VIM (type {}:{})", net_type.net_name)
+        
+        payload_req = '{"network":{"name": "' + net_name + '","shared":true,"type": "' + net_type + '"}}'
+        try:
+            vim_response = requests.post(self.url+'/networks', headers = self.headers_req, data=payload_req)
+        except requests.exceptions.RequestException as e:
+            self.logger.error( "new_external_network Exception: ", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print(vim_response)
+        #print vim_response.status_code
+        if vim_response.status_code == 200:
+            #print vim_response.json()
+            #print json.dumps(vim_response.json(), indent=4)
+            res,http_content = self._format_in(vim_response, new_network_response_schema)
+            #print http_content
+            if res:
+                r = self._remove_extra_items(http_content, new_network_response_schema)
+                if r is not None: print("Warning: remove extra items ", r)
+                #print http_content
+                network_id = http_content['network']['id']
+                print("Network id: ",network_id)
+                return vim_response.status_code,network_id
+            else: return -vimconn.HTTP_Bad_Request,http_content
+        else:
+            #print vim_response.text
+            jsonerror = self._format_jsonerror(vim_response)
+            text = 'Error in VIM "%s": not possible to add new external network. HTTP Response: %d. Error: %s' % (self.url, vim_response.status_code, jsonerror)
+            #print text
+            return -vim_response.status_code,text
+        
+    def connect_port_network(self, port_id, network_id, admin=False):
+        '''Connects a external port to a network'''
+        '''Returns status code of the VIM response'''
+        #TODO change to logging exception code policies
+        print("VIMConnector: Connecting external port to network")
+        
+        payload_req = '{"port":{"network_id":"' + network_id + '"}}'
+        if admin:
+            if self.url_admin==None:
+                return -vimconn.HTTP_Unauthorized, "datacenter cannot contain  admin URL"
+            url= self.url_admin
+        else:
+            url= self.url
+        try:
+            vim_response = requests.put(url +'/ports/'+port_id, headers = self.headers_req, data=payload_req)
+        except requests.exceptions.RequestException as e:
+            print("connect_port_network Exception: {}", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print(vim_response)
+        #print vim_response.status_code
+        if vim_response.status_code == 200:
+            #print vim_response.json()
+            #print json.dumps(vim_response.json(), indent=4)
+            res,http_content = self._format_in(vim_response, new_port_response_schema)
+            #print http_content
+            if res:
+                r = self._remove_extra_items(http_content, new_port_response_schema)
+                if r is not None: print( "Warning: remove extra items ", r)
+                #print http_content
+                port_id = http_content['port']['id']
+                print("Port id:{} ",port_id)
+                return vim_response.status_code,port_id
+            else: return -vimconn.HTTP_Bad_Request,http_content
+        else:
+            print(vim_response.text)
+            jsonerror = self._format_jsonerror(vim_response)
+            text = 'Error in VIM "%s": not possible to connect external port to network. HTTP Response: %d. Error: %s' % (self.url_admin, vim_response.status_code, jsonerror)
+            print(text)
+            return -vim_response.status_code,text
+        
+    def get_port(self, port_id):
+        '''Obtain port details of port id'''
+        try:
+            url = self.url+'/ports/'+port_id
+            self.logger.info("Getting port GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            return response['port']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/rwcal_openmano_vimconnector.py b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rwcal_openmano_vimconnector.py
new file mode 100644
index 0000000..aa3d971
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rwcal_openmano_vimconnector.py
@@ -0,0 +1,664 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+from gi import require_version
+require_version('RwCal', '1.0')
+import rift.rwcal.openmano_vimconnector as vimconn_openvim
+import contextlib
+import requests
+import paramiko
+import os
+import uuid
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+import rw_status
+import rift.cal.rwcal_status as rwcal_status
+import rwlogger
+
+logger = logging.getLogger('rwcal.openmano_vimconnector')
+
+class UnknownAccountError(Exception):
+    pass
+
+class OpenvimCALOperationFailure(Exception):
+    pass
+
+class MissingFileError(Exception):
+    pass
+
+
+class ImageLocationError(Exception):
+    pass
+
+class UninitializedPluginError(Exception):
+    pass
+
+rwstatus_exception_map = {IndexError: RwTypes.RwStatus.NOTFOUND,
+                          KeyError: RwTypes.RwStatus.NOTFOUND,
+                          UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+                          MissingFileError: RwTypes.RwStatus.NOTFOUND,
+                          } 
+
+rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
+rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
+
+
+class RwcalOpenmanoVimConnector(GObject.Object, RwCal.Cloud):
+    """Stub implementation the CAL VALA methods for Openmano. """
+
+    instance_num = 1
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self._driver_class = vimconn_openvim.vimconnector
+        self.log = logging.getLogger('rwcal.openmano_vimconnector.%s' % RwcalOpenmanoVimConnector.instance_num)
+        self.log.setLevel(logging.DEBUG)
+        self._rwlog_handler = None
+        self._tenant_name = None
+        RwcalOpenmanoVimConnector.instance_num += 1
+
+    @contextlib.contextmanager
+    def _use_driver(self, account):
+        #if self._rwlog_handler is None:
+        #    raise UninitializedPluginError("Must call init() in CAL plugin before use.")
+
+        #with rwlogger.rwlog_root_handler(self._rwlog_handler):
+            try:
+                if self._tenant_name != account.openvim.tenant_name:
+                    tmp_drv = self._driver_class(uuid = '',
+                                  name  = '',
+                                  #tenant_id  = account.openvim.tenant_id,
+                                  tenant_id  = '',
+                                  tenant_name = '',
+                                  url   ='http://{}:{}/openvim'.format(account.openvim.host,account.openvim.port),
+                                  url_admin = '')
+                    tenant_dict = {'name':account.openvim.tenant_name}
+                    tenant_list = tmp_drv.get_tenant_list(tenant_dict)
+                    if len(tenant_list) == 0:
+                        tmp_drv.new_tenant(account.openvim.tenant_name,"default tenant")
+                        self._tenant_name = account.openvim.tenant_name 
+                    else:
+                        self._tenant_name = account.openvim.tenant_name
+                  
+                     
+                drv = self._driver_class(uuid = '',
+                                  name  = '',
+                                  #tenant_id  = account.openvim.tenant_id,
+                                  tenant_id  = '',
+                                  tenant_name = account.openvim.tenant_name,
+                                  url   ='http://{}:{}/openvim'.format(account.openvim.host,account.openvim.port),
+                                  url_admin = '')
+
+            except Exception as e:
+                self.log.error("RwcalOpenmanoVimConnectorPlugin: VimConnector init failed. Exception: %s" %(str(e)))
+                raise
+
+            yield drv
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-cal-log",
+                    subcategory="openmano_vimconnector",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        If creds are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus()
+        url = 'http://{}:{}/openvim/'.format(account.openvim.host,account.openvim.port)
+        try:
+            r=requests.get(url,timeout=3)
+            r.raise_for_status()
+        except requests.exceptions.HTTPError as e:
+            self.log.error("OpenvimConnectorPlugin: Openvim account credential validation failed. Exception: %s", str(e))
+            status.status = "failure"
+            status.details = "Invalid Credentials: %s" % str(e)
+        except Exception as e:
+            self.log.error("OpenvimConnectorPlugin: Openvim connection failed. Exception: %s", str(e))
+            status.status = "failure"
+            status.details = "Connection Failed (Invlaid URL): %s" % str(e)
+        else:
+            self.log.debug("Openvim Successfully connected")
+            status.status = "success"
+            status.details = "Connection was successful"
+
+        return status
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        with self._use_driver(account) as drv:
+            return drv.new_tenant(name, "New CAL teannt");
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        with self._use_driver(account) as drv:
+            drv.delete_tenant(tenant_id);
+
+    @staticmethod
+    def _fill_tenant_info(tenant_info):
+        """Create a GI object from tenant info dictionary
+
+        Converts tenant information dictionary object returned by openmano vimconnector
+        driver into Protobuf Gi Object
+
+        Arguments:
+            tenant_info - tenant information dictionary object
+
+        Returns:
+            The TenantInfoItem
+        """
+        tenant = RwcalYang.TenantInfoItem()
+        tenant.tenant_name = tenant_info['name']
+        tenant.tenant_id = tenant_info['id']
+        return tenant
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            tenants = drv.get_tenant_list()
+        for tenant in tenants:
+            response.tenantinfo_list.append(RwcalOpenmanoVimConnector._fill_tenant_info(tenant))
+        return response
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        with self._use_driver(account) as drv:
+            try:
+                # If the use passed in a file descriptor, use that to
+                # upload the image.
+                if image.has_field("fileno"):
+                    new_fileno = os.dup(image.fileno)
+                    hdl = os.fdopen(new_fileno, 'rb')
+                else:
+                    hdl = open(image.location, "rb")
+            except Exception as e:
+                self.log.error("Could not open file for upload. Exception received: %s", str(e))
+                raise
+
+            tpt = paramiko.Transport((account.openvim.host, 22))
+            try:
+                tpt.connect(username=account.openvim.image_management.username,
+                            password=account.openvim.image_management.password)
+            except Exception as e:
+                self.log.error('Could not connect to openvim host: %s. Exception: %s', account.openvim.host, e)
+                return
+
+            sftp = paramiko.SFTPClient.from_transport(tpt)
+            destination = account.openvim.image_management.image_directory_path.rstrip('/')+'/'+image.name
+            with hdl as fd:
+                try:
+                    sftp.putfo(fd, destination)
+                except Exception as e:
+                    self.log.warn('*** Caught exception: %s: %s', e.__class__, e)
+                finally:
+                    sftp.close()
+                    tpt.close()
+
+            image_dict = {}
+            image_dict['name'] = image.name
+            image_dict['location'] = destination
+            image_id = drv.new_image(image_dict)
+        return image_id
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        with self._use_driver(account) as drv:
+            drv.delete_image(image_id)
+
+    @staticmethod
+    def _fill_image_info(img_info):
+        img = RwcalYang.ImageInfoItem()
+        img.name = img_info['name']
+        img.id = img_info['id']
+        img.location = img_info['path']
+        if img_info['status'] == 'ACTIVE':
+            img.state = 'active'
+        else:
+            img.state = 'inactive'
+        return img
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        with self._use_driver(account) as drv:
+            image = drv.get_image(image_id)
+        return RwcalOpenmanoVimConnector._fill_image_info(image)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            images = drv.get_image_list()
+        for img in images:
+            image_info = drv.get_image(img['id'])
+            response.imageinfo_list.append(RwcalOpenmanoVimConnector._fill_image_info(image_info))
+        return response
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        return RwcalYang.VimResources()
+
+    def _fill_flavor_create_attributes(flavor):
+        flavor_dict = dict()
+        flavor_dict['name'] = flavor.name
+        flavor_dict['ram'] = flavor.vm_flavor.memory_mb
+        flavor_dict['disk'] = flavor.vm_flavor.storage_gb
+        flavor_dict['vcpus'] = flavor.vm_flavor.vcpu_count 
+        return flavor_dict
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        with self._use_driver(account) as drv:
+            flavor_dict = RwcalOpenmanoVimConnector._fill_flavor_create_attributes(flavor) 
+            flavor_id = drv.new_flavor(flavor_dict)
+        return flavor_id
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        with self._use_driver(account) as drv:
+            drv.delete_flavor(flavor_id)
+
+    @staticmethod
+    def _fill_epa_attributes(flavor, flavor_info):
+        if 'ram' in flavor_info and flavor_info['ram']:
+            getattr(flavor, 'vm_flavor').memory_mb   = flavor_info.get('ram',0)
+        if 'disk' in flavor_info and flavor_info['disk']:
+            getattr(flavor, 'vm_flavor').storage_gb  = flavor_info.get('disk',0)
+        if 'vcpus' in flavor_info and flavor_info['vcpus']:
+            getattr(flavor, 'vm_flavor').vcpu_count  = flavor_info.get('vcpus',0)
+
+        if not 'extended' in flavor_info or flavor_info['extended'] is None:
+            return
+        getattr(flavor,'guest_epa').numa_node_policy.node_cnt = len(flavor_info['extended']['numas'])
+        for attr in flavor_info['extended']['numas']:
+            numa_node = getattr(flavor,'guest_epa').numa_node_policy.node.add()
+            numa_node.memory_mb = attr.get('memory',0)*1024
+            #getattr(flavor, 'host_epa').cpu_core_thread_count =
+
+    @staticmethod
+    def _fill_flavor_info(flavor_info):
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name                       = flavor_info['name']
+        flavor.id                         = flavor_info['id']
+        RwcalOpenmanoVimConnector._fill_epa_attributes(flavor, flavor_info)
+        return flavor
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        with self._use_driver(account) as drv:
+            flavor = drv.get_flavor(flavor_id)
+        return RwcalOpenmanoVimConnector._fill_flavor_info(flavor)
+
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            flavors = drv.get_flavor_list()
+        for flav in flavors:
+            flav_info = drv.get_flavor(flav['id'])
+            response.flavorinfo_list.append(RwcalOpenmanoVimConnector._fill_flavor_info(flav_info))
+        return response
+
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        return RwcalYang.VimResources()
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        with self._use_driver(account) as drv:
+            network_id = drv.new_network(network.name,'bridge_man')
+            return network_id
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        with self._use_driver(account) as drv:
+            drv.delete_network(network_id)
+
+    def _fill_network_info(self, network_info):
+        network                  = RwcalYang.NetworkInfoItem()
+        network.network_name     = network_info['name']
+        network.network_id       = network_info['id']
+        if ('provider:physical' in network_info) and (network_info['provider:physical']):
+            network.provider_network.physical_network = network_info['provider:physical'].upper()
+        if ('provider:vlan' in network_info) and (network_info['provider:vlan']):
+            network.provider_network.segmentation_id = network_info['provider:vlan']
+            network.provider_network.overlay_type = 'vlan'
+        return network
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        with self._use_driver(account) as drv:
+            network = drv.get_network(id)
+        return self._fill_network_info(network)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            networks = drv.get_network_list()
+        for network in networks:
+            response.networkinfo_list.append(self._fill_network_info(network))
+        return response
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        with self._use_driver(account) as drv:
+            net = dict()
+            if link_params.provider_network.physical_network is not None:
+                net['provider:physical'] = link_params.provider_network.physical_network
+            #else:
+            #    net['provider:physical'] = 'default'
+            if link_params.provider_network.overlay_type == 'VLAN' and link_params.provider_network.segmentation_id:
+                net['provider:vlan'] = link_params.provider_network.segmentation_id
+            network_id = drv.new_network(link_params.name,'bridge_man',shared=False,**net)
+            return network_id
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        with self._use_driver(account) as drv:
+            drv.delete_network(link_id)
+
+
+    @staticmethod
+    def _fill_connection_point_info(c_point, port_info):
+        c_point.name = port_info['name']
+        c_point.connection_point_id = port_info['id']
+        if 'ip_address' in port_info:
+                c_point.ip_address = port_info['ip_address']
+        if port_info['status'] == 'ACTIVE':
+            c_point.state = 'active'
+        else:
+            c_point.state = 'inactive'
+        if 'network_id' in port_info:
+            c_point.virtual_link_id = port_info['network_id']
+        if ('device_id' in port_info) and (port_info['device_id']):
+            c_point.vdu_id = port_info['device_id']
+
+    def _fill_virtual_link_info(self, drv, network_info):
+        link = RwcalYang.VirtualLinkInfoParams()
+        link.name     = network_info['name']
+        link.virtual_link_id       = network_info['id']
+        if network_info['admin_state_up']:
+            link.state = 'active'
+        else:
+            link.state = 'inactive'
+        link.virtual_link_id = network_info['id']
+        if ('provider:physical' in network_info) and (network_info['provider:physical']):
+            link.provider_network.physical_network = network_info['provider:physical']
+        if ('provider:vlan' in network_info) and (network_info['provider:vlan']):
+            link.provider_network.segmentation_id = network_info['provider:vlan']
+            link.provider_network.overlay_type = 'VLAN'
+
+        if 'ports' in network_info:
+            for port in network_info['ports']:
+                if 'port_id' in port:
+                    port_id = port['port_id']
+                    port = drv.get_port(port_id)
+                    c_point = link.connection_points.add()
+                    RwcalOpenmanoVimConnector._fill_connection_point_info(c_point, port)
+        return link
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        with self._use_driver(account) as drv:
+            network = drv.get_network(link_id)
+        return self._fill_virtual_link_info(drv,network)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_virtual_link_list(self, account):
+        response = RwcalYang.VNFResources()
+        with self._use_driver(account) as drv:
+            networks = drv.get_network_list()
+        for network in networks:
+            network_info = drv.get_network(network['id'])
+            response.virtual_link_info_list.append(self._fill_virtual_link_info(drv,network_info))
+        return response
+
+    def _match_vm_flavor(self, required, available):
+        self.log.info("Matching VM Flavor attributes required {}, available {}".format(required, available))
+        if available.vcpu_count != required.vcpu_count:
+            return False
+        if available.memory_mb != required.memory_mb:
+            return False
+        if available.storage_gb != required.storage_gb:
+            return False
+        self.log.debug("VM Flavor match found")
+        return True
+
+
+    def _select_resource_flavor(self, account, vdu_init):
+        """ 
+            Select a existing flavor if it matches the request or create new flavor
+        """
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name = str(uuid.uuid4())
+        epa_types = ['vm_flavor', 'guest_epa', 'host_epa', 'host_aggregate', 'hypervisor_epa', 'vswitch_epa']
+        epa_dict = {k: v for k, v in vdu_init.as_dict().items() if k in epa_types}
+        flavor.from_dict(epa_dict)
+ 
+        rc, response = self.do_get_flavor_list(account)
+        if rc != RwTypes.RwStatus.SUCCESS:
+            self.log.error("Get-flavor-info-list operation failed for cloud account: %s",
+                        account.name)
+            raise OpenvimCALOperationFailure("Get-flavor-info-list operation failed for cloud account: %s" %(account.name))
+
+        flavor_id = None
+        flavor_list = response.flavorinfo_list
+        self.log.debug("Received %d flavor information from RW.CAL", len(flavor_list))
+        for flv in flavor_list:
+            self.log.info("Attempting to match compute requirement for VDU: %s with flavor %s",
+                       vdu_init.name, flv)
+            if self._match_vm_flavor(flavor.vm_flavor,flv.vm_flavor):
+                self.log.info("Flavor match found for compute requirements for VDU: %s with flavor name: %s, flavor-id: %s",
+                           vdu_init.name, flv.name, flv.id)
+                return flv.id
+
+        if account.openvim.dynamic_flavor_support is False:
+            self.log.error("Unable to create flavor for compute requirement for VDU: %s. VDU instantiation failed", vdu_init.name)
+            raise OpenvimCALOperationFailure("No resource available with matching EPA attributes")
+        else:
+            rc,flavor_id = self.do_create_flavor(account,flavor)
+            if rc != RwTypes.RwStatus.SUCCESS:
+                self.log.error("Create-flavor operation failed for cloud account: %s",
+                        account.name)
+                raise OpenvimCALOperationFailure("Create-flavor operation failed for cloud account: %s" %(account.name))
+            return flavor_id
+
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        with self._use_driver(account) as drv:
+            net_list = list()
+
+            if not vdu_init.has_field('flavor_id'):
+                vdu_init.flavor_id = self._select_resource_flavor(account,vdu_init)
+
+            if account.openvim.mgmt_network:
+                mgmt_net_list = drv.get_network_list()
+                mgmt_net_id = [net['id'] for net in mgmt_net_list if net['name'] == account.openvim.mgmt_network]
+                if len(mgmt_net_id) > 0:
+                    mgmt_net_dict = {}
+                    mgmt_net_dict['name'] = account.openvim.mgmt_network
+                    mgmt_net_dict['net_id'] = mgmt_net_id[0]
+                    mgmt_net_dict['type'] = 'virtual'
+                    net_list.append(mgmt_net_dict)
+                
+            for c_point in vdu_init.connection_points:
+                net_dict = {}
+                net_dict['name'] = c_point.name
+                net_dict['net_id'] = c_point.virtual_link_id
+                net_dict['type'] = 'virtual'
+                net_list.append(net_dict)
+
+            vm_id = drv.new_vminstance(vdu_init.name,vdu_init.name,None,vdu_init.image_id,vdu_init.flavor_id,net_list);
+            return vm_id
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        pass
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        if not vdu_id:
+            self.log.error("empty vdu_id during the vdu deletion")
+            return
+
+        with self._use_driver(account) as drv:
+            drv.delete_vminstance(vdu_id)
+
+    @staticmethod
+    def _fill_vdu_info(drv,account,vm_info):
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.name = vm_info['name']
+        vdu.vdu_id = vm_info['id']
+        mgmt_net_id = None
+        if ('image' in vm_info) and ('id' in vm_info['image']):
+            vdu.image_id = vm_info['image']['id']
+        if ('flavor' in vm_info) and ('id' in vm_info['flavor']):
+            vdu.flavor_id = vm_info['flavor']['id']
+        vdu.cloud_type  = 'openvim'
+
+        if account.openvim.mgmt_network:
+            net_list = drv.get_network_list()
+            mgmt_net_list = [net['id'] for net in net_list if net['name'] == account.openvim.mgmt_network]
+            if len(mgmt_net_list) > 0:
+                mgmt_net_id = mgmt_net_list[0]
+
+        if 'networks' in vm_info:
+            for network in vm_info['networks']:
+                port_id = network['iface_id']
+                port = drv.get_port(port_id)
+                if 'network_id' in port and mgmt_net_id == port['network_id'] and 'ip_address' in port:
+                    vdu.management_ip = port['ip_address']
+                    vdu.public_ip = vdu.management_ip
+                else:
+                    c_point = vdu.connection_points.add()
+                    RwcalOpenmanoVimConnector._fill_connection_point_info(c_point, port)
+
+
+        if vm_info['status'] == 'ACTIVE' and vdu.management_ip != '':
+            vdu.state = 'active'
+        elif vm_info['status'] == 'ERROR':
+            vdu.state = 'failed'
+        else:
+            vdu.state = 'inactive'
+
+        if vdu.flavor_id:
+           flavor = drv.get_flavor(vdu.flavor_id)
+           RwcalOpenmanoVimConnector._fill_epa_attributes(vdu, flavor)
+        return vdu
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        with self._use_driver(account) as drv:
+            vm_info = drv.get_vminstance(vdu_id)
+        return  RwcalOpenmanoVimConnector._fill_vdu_info(drv,account,vm_info)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_vdu_list(self, account):
+        vnf_resource = RwcalYang.VNFResources()
+        with self._use_driver(account) as drv:
+            vms = drv.get_vminstance_list()
+        for vm in vms:
+            vm_info = drv.get_vminstance(vm['id'])
+            vdu = RwcalOpenmanoVimConnector._fill_vdu_info(drv,account,vm_info)
+            vnf_resource.vdu_info_list.append(vdu)
+        return vnf_resource
+
diff --git a/rwcal/plugins/vala/rwcal_openstack/CMakeLists.txt b/rwcal/plugins/vala/rwcal_openstack/CMakeLists.txt
new file mode 100644
index 0000000..af92d7d
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/CMakeLists.txt
@@ -0,0 +1,36 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+### rwcal-openstack package
+set(PKG_NAME rwcal-openstack)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+rift_install_python_plugin(rwcal_openstack rwcal_openstack.py)
+
+rift_python_install_tree(
+  FILES
+    rift/rwcal/openstack/__init__.py
+    rift/rwcal/openstack/openstack_drv.py
+    rift/rwcal/openstack/openstack_utils.py
+    rift/rwcal/openstack/prepare_vm.py
+  PYTHON3_ONLY
+  COMPONENT ${PKG_LONG_NAME})
+
diff --git a/rwcal/plugins/vala/rwcal_openstack/Makefile b/rwcal/plugins/vala/rwcal_openstack/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/__init__.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/__init__.py
new file mode 100644
index 0000000..3226655
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/__init__.py
@@ -0,0 +1,22 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .openstack_drv import (
+        OpenstackDriver,
+        ValidationError
+        )
+from .openstack_utils import OpenstackExtraSpecUtils
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py
new file mode 100644
index 0000000..2505da3
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py
@@ -0,0 +1,1997 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import json
+import logging
+import ipaddress
+
+from keystoneclient import v3 as ksclientv3
+from keystoneclient.v2_0 import client as ksclientv2
+from novaclient import client as nova_client
+from neutronclient.neutron import client as ntclient
+from glanceclient.v2 import client as glclient
+from ceilometerclient import client as ceilo_client
+
+# Exceptions
+import novaclient.exceptions as NovaException
+import keystoneclient.exceptions as KeystoneExceptions
+import neutronclient.common.exceptions as NeutronException
+import glanceclient.exc as GlanceException
+
+logger = logging.getLogger('rwcal.openstack.drv')
+logger.setLevel(logging.DEBUG)
+
+class ValidationError(Exception):
+    pass
+
+
+class KeystoneDriver(object):
+    """
+    Driver base-class for keystoneclient APIs
+    """
+    def __init__(self, ksclient):
+        """
+        Constructor for KeystoneDriver base class
+        Arguments: None
+        Returns: None
+        """
+        self.ksclient = ksclient
+
+    def get_username(self):
+        """
+        Returns the username associated with keystoneclient connection
+        """
+        return self._username
+
+    def get_password(self):
+        """
+        Returns the password associated with keystoneclient connection
+        """
+        return self._password
+
+    def get_tenant_name(self):
+        """
+        Returns the tenant name associated with keystoneclient connection
+        """
+        return self._tenant_name
+
+    def _get_keystone_connection(self):
+        """
+        Returns object of class python-keystoneclient class
+        """
+        if not hasattr(self, '_keystone_connection'):
+            self._keystone_connection = self.ksclient(**self._get_keystone_credentials())
+        return self._keystone_connection
+
+    def is_auth_token_valid(self, token_expiry, time_fmt):
+        """
+        Performs validity on auth_token
+        Arguments:
+          token_expiry (string): Expiry time for token
+          time_fmt (string)    : Format for expiry string in auth_ref
+
+        Returns:
+        True/False (Boolean):  (auth_token is valid or auth_token is invalid)
+        """
+        import time
+        import datetime
+        import dateutil.parser
+        try:
+            now = datetime.datetime.timetuple(datetime.datetime.utcnow())
+            expires_at = dateutil.parser.parse(token_expiry)
+            t_now = time.mktime(now)
+            t_expiry = time.mktime(expires_at.timetuple())
+
+            if (t_expiry <= t_now) or ((t_expiry - t_now) < 300 ):
+                ### Token has expired or about to expire (5 minute)
+                delattr(self, '_keystone_connection')
+                return False
+            else:
+                return True
+        except Exception as e:
+            logger.error("Received except %s during auth_token validity check" %str(e))
+            logger.info("Can not validate the auth_token. Assuming invalid")
+            return False
+
+
+    def get_service_endpoint(self, service_type, endpoint_type):
+        """
+        Returns requested type of endpoint for requested service type
+        Arguments:
+          service_type (string): Service Type (e.g. computev3, image, network)
+          endpoint_type(string): Endpoint Type (e.g. publicURL,adminURL,internalURL)
+        Returns:
+          service_endpoint(string): Service endpoint string
+        """
+        endpoint_kwargs   = {'service_type'  : service_type,
+                             'endpoint_type' : endpoint_type}
+        try:
+            ksconn = self._get_keystone_connection()
+            service_endpoint  = ksconn.service_catalog.url_for(**endpoint_kwargs)
+        except Exception as e:
+            logger.error("OpenstackDriver: Service Catalog discovery operation failed for service_type: %s, endpoint_type: %s. Exception: %s" %(service_type, endpoint_type, str(e)))
+            raise
+        return service_endpoint
+
+
+    def get_raw_token(self):
+        """
+        Returns a valid raw_auth_token string
+
+        Returns (string): raw_auth_token string
+        """
+        ksconn = self._get_keystone_connection()
+        try:
+            raw_token = ksconn.get_raw_token_from_identity_service(auth_url = self._auth_url,
+                                                                   token    = self.get_auth_token())
+        except KeystoneExceptions.AuthorizationFailure as e:
+            logger.error("OpenstackDriver: get_raw_token_from_identity_service Failure. Exception: %s" %(str(e)))
+            return None
+
+        except Exception as e:
+            logger.error("OpenstackDriver: Could not retrieve raw_token. Exception: %s" %(str(e)))
+
+        return raw_token
+
+    def get_tenant_id(self):
+        """
+        Returns tenant_id for the project/tenant. Tenant name is provided during
+        class instantiation
+
+        Returns (string): Tenant ID
+        """
+        ksconn = self._get_keystone_connection()
+        return ksconn.tenant_id
+
+    def get_security_mode(self):
+        """
+        Returns certificate_validation policy in case of SSL/TLS connection.
+        This policy is provided during class instantiation
+
+        Returns (boolean):
+        The boolean returned are designed to match the python-client class instantiation ("insecure") value.
+        for nova/neutron/glance/keystone clients
+
+        True: No certificate validation required -- Insecure mode
+        False: Certificate validation required -- Secure mode
+        """
+        return self._insecure
+
+    def tenant_list(self):
+        """
+        Returns list of tenants
+        """
+        pass
+
+    def tenant_create(self, name):
+        """
+        Create a new tenant
+        """
+        pass
+
+    def tenant_delete(self, tenant_id):
+        """
+        Deletes a tenant identified by tenant_id
+        """
+        pass
+
+    def roles_list(self):
+        pass
+
+    def roles_create(self):
+        pass
+
+    def roles_delete(self):
+        pass
+
+class KeystoneDriverV2(KeystoneDriver):
+    """
+    Driver class for keystoneclient V2 APIs
+    """
+    def __init__(self, username, password, auth_url,tenant_name, insecure):
+        """
+        Constructor for KeystoneDriverV3 class
+        Arguments:
+        username (string)  : Username
+        password (string)  : Password
+        auth_url (string)  : Authentication URL
+        tenant_name(string): Tenant Name
+
+        Returns: None
+        """
+        self._username    = username
+        self._password    = password
+        self._auth_url    = auth_url
+        self._tenant_name = tenant_name
+        self._insecure    = insecure
+        super(KeystoneDriverV2, self).__init__(ksclientv2.Client)
+
+    def _get_keystone_credentials(self):
+        """
+        Returns the dictionary of kwargs required to instantiate python-keystoneclient class
+        """
+        creds                 = {}
+        #creds['user_domain'] = self._domain_name
+        creds['username']     = self._username
+        creds['password']     = self._password
+        creds['auth_url']     = self._auth_url
+        creds['tenant_name']  = self._tenant_name
+        creds['insecure']     = self.get_security_mode()
+        return creds
+
+    def get_auth_token(self):
+        """
+        Returns a valid auth_token
+
+        Returns (string): auth_token string
+        """
+        ksconn = self._get_keystone_connection()
+        return ksconn.auth_token
+
+    def is_auth_token_valid(self):
+        """
+        Performs validity on auth_token
+        Arguments:
+
+        Returns:
+        True/False (Boolean):  (auth_token is valid or auth_token is invalid)
+        """
+        ksconn = self._get_keystone_connection()
+        result = super(KeystoneDriverV2, self).is_auth_token_valid(ksconn.auth_ref['token']['expires'],
+                                                                   "%Y-%m-%dT%H:%M:%SZ")
+        return result
+
+
+class KeystoneDriverV3(KeystoneDriver):
+    """
+    Driver class for keystoneclient V3 APIs
+    """
+    def __init__(self, username, password, auth_url,tenant_name, insecure):
+        """
+        Constructor for KeystoneDriverV3 class
+        Arguments:
+        username (string)  : Username
+        password (string)  : Password
+        auth_url (string)  : Authentication URL
+        tenant_name(string): Tenant Name
+
+        Returns: None
+        """
+        self._username    = username
+        self._password    = password
+        self._auth_url    = auth_url
+        self._tenant_name = tenant_name
+        self._insecure    = insecure
+        super(KeystoneDriverV3, self).__init__(ksclientv3.Client)
+
+    def _get_keystone_credentials(self):
+        """
+        Returns the dictionary of kwargs required to instantiate python-keystoneclient class
+        """
+        creds                 = {}
+        #creds['user_domain'] = self._domain_name
+        creds['username']     = self._username
+        creds['password']     = self._password
+        creds['auth_url']     = self._auth_url
+        creds['project_name'] = self._tenant_name
+        creds['insecure']     = self._insecure
+        return creds
+
+    def get_auth_token(self):
+        """
+        Returns a valid auth_token
+
+        Returns (string): auth_token string
+        """
+        ksconn = self._get_keystone_connection()
+        return ksconn.auth_ref['auth_token']
+
+    def is_auth_token_valid(self):
+        """
+        Performs validity on auth_token
+        Arguments:
+
+        Returns:
+        True/False (Boolean):  (auth_token is valid or auth_token is invalid)
+        """
+        ksconn = self._get_keystone_connection()
+        result = super(KeystoneDriverV3, self).is_auth_token_valid(ksconn.auth_ref['expires_at'],
+                                                                   "%Y-%m-%dT%H:%M:%S.%fZ")
+        return result
+
+class NovaDriver(object):
+    """
+    Driver for openstack nova_client
+    """
+    def __init__(self, ks_drv, service_name, version):
+        """
+        Constructor for NovaDriver
+        Arguments: KeystoneDriver class object
+        """
+        self.ks_drv = ks_drv
+        self._service_name = service_name
+        self._version = version
+
+    def _get_nova_credentials(self):
+        """
+        Returns a dictionary of kwargs required to instantiate python-novaclient class
+        """
+        creds               = {}
+        creds['version']    = self._version
+        creds['bypass_url'] = self.ks_drv.get_service_endpoint(self._service_name, "publicURL")
+        creds['username']   = self.ks_drv.get_username()
+        creds['project_id'] = self.ks_drv.get_tenant_name()
+        creds['auth_token'] = self.ks_drv.get_auth_token()
+        creds['insecure']   = self.ks_drv.get_security_mode()
+        return creds
+
+    def _get_nova_connection(self):
+        """
+        Returns an object of class python-novaclient
+        """
+        if not hasattr(self, '_nova_connection'):
+            self._nova_connection = nova_client.Client(**self._get_nova_credentials())
+        else:
+            # Reinitialize if auth_token is no longer valid
+            if not self.ks_drv.is_auth_token_valid():
+                self._nova_connection = nova_client.Client(**self._get_nova_credentials())
+        return self._nova_connection
+
+    def _flavor_get(self, flavor_id):
+        """
+        Get flavor by flavor_id
+        Arguments:
+           flavor_id(string): UUID of flavor_id
+
+        Returns:
+        dictionary of flavor parameters
+        """
+        nvconn = self._get_nova_connection()
+        try:
+            flavor = nvconn.flavors.get(flavor_id)
+        except Exception as e:
+            logger.info("OpenstackDriver: Did not find flavor with flavor_id : %s. Exception: %s"%(flavor_id, str(e)))
+            raise
+
+        try:
+            extra_specs = flavor.get_keys()
+        except Exception as e:
+            logger.info("OpenstackDriver: Could not get the EPA attributes for flavor with flavor_id : %s. Exception: %s"%(flavor_id, str(e)))
+            raise
+
+        response = flavor.to_dict()
+        assert 'extra_specs' not in response, "Key extra_specs present as flavor attribute"
+        response['extra_specs'] = extra_specs
+        return response
+
+    def flavor_get(self, flavor_id):
+        """
+        Get flavor by flavor_id
+        Arguments:
+           flavor_id(string): UUID of flavor_id
+
+        Returns:
+        dictionary of flavor parameters
+        """
+        return self._flavor_get(flavor_id)
+
+    def flavor_list(self):
+        """
+        Returns list of all flavors (dictionary per flavor)
+
+        Arguments:
+           None
+        Returns:
+           A list of dictionaries. Each dictionary contains attributes for a single flavor instance
+        """
+        flavors = []
+        flavor_info = []
+        nvconn =  self._get_nova_connection()
+        try:
+            flavors = nvconn.flavors.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: List Flavor operation failed. Exception: %s"%(str(e)))
+            raise
+        if flavors:
+            flavor_info = [ self.flavor_get(flv.id) for flv in flavors ]
+        return flavor_info
+
+    def flavor_create(self, name, ram, vcpu, disk, extra_specs):
+        """
+        Create a new flavor
+
+        Arguments:
+           name   (string):  Name of the new flavor
+           ram    (int)   :  Memory in MB
+           vcpus  (int)   :  Number of VCPUs
+           disk   (int)   :  Secondary storage size in GB
+           extra_specs (dictionary): EPA attributes dictionary
+
+        Returns:
+           flavor_id (string): UUID of flavor created
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            flavor = nvconn.flavors.create(name        = name,
+                                           ram         = ram,
+                                           vcpus       = vcpu,
+                                           disk        = disk,
+                                           flavorid    = 'auto',
+                                           ephemeral   = 0,
+                                           swap        = 0,
+                                           rxtx_factor = 1.0,
+                                           is_public    = True)
+        except Exception as e:
+            logger.error("OpenstackDriver: Create Flavor operation failed. Exception: %s"%(str(e)))
+            raise
+
+        if extra_specs:
+            try:
+                flavor.set_keys(extra_specs)
+            except Exception as e:
+                logger.error("OpenstackDriver: Set Key operation failed for flavor: %s. Exception: %s" %(flavor.id, str(e)))
+                raise
+        return flavor.id
+
+    def flavor_delete(self, flavor_id):
+        """
+        Deletes a flavor identified by flavor_id
+
+        Arguments:
+           flavor_id (string):  UUID of flavor to be deleted
+
+        Returns: None
+        """
+        assert flavor_id == self._flavor_get(flavor_id)['id']
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.flavors.delete(flavor_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete flavor operation failed for flavor: %s. Exception: %s" %(flavor_id, str(e)))
+            raise
+
+
+    def server_list(self):
+        """
+        Returns a list of available VMs for the project
+
+        Arguments: None
+
+        Returns:
+           A list of dictionaries. Each dictionary contains attributes associated
+           with individual VM
+        """
+        servers     = []
+        server_info = []
+        nvconn      = self._get_nova_connection()
+        try:
+            servers     = nvconn.servers.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: List Server operation failed. Exception: %s" %(str(e)))
+            raise
+        server_info = [ server.to_dict() for server in servers]
+        return server_info
+
+    def _nova_server_get(self, server_id):
+        """
+        Returns a dictionary of attributes associated with VM identified by service_id
+
+        Arguments:
+          server_id (string): UUID of the VM/server for which information is requested
+
+        Returns:
+          A dictionary object with attributes associated with VM identified by server_id
+        """
+        nvconn = self._get_nova_connection()
+        try:
+            server = nvconn.servers.get(server = server_id)
+        except Exception as e:
+            logger.info("OpenstackDriver: Get Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
+            raise
+        else:
+            return server.to_dict()
+
+    def server_get(self, server_id):
+        """
+        Returns a dictionary of attributes associated with VM identified by service_id
+
+        Arguments:
+          server_id (string): UUID of the VM/server for which information is requested
+
+        Returns:
+          A dictionary object with attributes associated with VM identified by server_id
+        """
+        return self._nova_server_get(server_id)
+
+    def server_create(self, **kwargs):
+        """
+        Creates a new VM/server instance
+
+        Arguments:
+          A dictionary of following key-value pairs
+         {
+           server_name(string)        : Name of the VM/Server
+           flavor_id  (string)        : UUID of the flavor to be used for VM
+           image_id   (string)        : UUID of the image to be used VM/Server instance
+           network_list(List)         : A List of network_ids. A port will be created in these networks
+           port_list (List)           : A List of port-ids. These ports will be added to VM.
+           metadata   (dict)          : A dictionary of arbitrary key-value pairs associated with VM/server
+           userdata   (string)        : A script which shall be executed during first boot of the VM
+           availability_zone (string) : A name of the availability zone where instance should be launched
+           scheduler_hints (string)   : Openstack scheduler_hints to be passed to nova scheduler
+         }
+        Returns:
+          server_id (string): UUID of the VM/server created
+
+        """
+        nics = []
+        if 'network_list' in kwargs:
+            for network_id in kwargs['network_list']:
+                nics.append({'net-id': network_id})
+
+        if 'port_list' in kwargs:
+            for port_id in kwargs['port_list']:
+                nics.append({'port-id': port_id})
+
+        nvconn = self._get_nova_connection()
+
+        try:
+            server = nvconn.servers.create(kwargs['name'],
+                                           kwargs['image_id'],
+                                           kwargs['flavor_id'],
+                                           meta                 = kwargs['metadata'],
+                                           files                = None,
+                                           reservation_id       = None,
+                                           min_count            = None,
+                                           max_count            = None,
+                                           userdata             = kwargs['userdata'],
+                                           security_groups      = kwargs['security_groups'],
+                                           availability_zone    = kwargs['availability_zone'],
+                                           block_device_mapping = None,
+                                           nics                 = nics,
+                                           scheduler_hints      = kwargs['scheduler_hints'],
+                                           config_drive         = None)
+        except Exception as e:
+            logger.info("OpenstackDriver: Create Server operation failed. Exception: %s" %(str(e)))
+            raise
+        return server.to_dict()['id']
+
+    def server_delete(self, server_id):
+        """
+        Deletes a server identified by server_id
+
+        Arguments:
+           server_id (string): UUID of the server to be deleted
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.delete(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_start(self, server_id):
+        """
+        Starts a server identified by server_id
+
+        Arguments:
+           server_id (string): UUID of the server to be started
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.start(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Start Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_stop(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be stopped
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.stop(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Stop Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_pause(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be paused
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.pause(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Pause Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_unpause(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be unpaused
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.unpause(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Resume Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+
+    def server_suspend(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be suspended
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.suspend(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Suspend Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+
+
+    def server_resume(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be resumed
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.resume(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Resume Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_reboot(self, server_id, reboot_type):
+        """
+        Arguments:
+           server_id (string) : UUID of the server to be rebooted
+           reboot_type(string):
+                         'SOFT': Soft Reboot
+                         'HARD': Hard Reboot
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.reboot(server_id, reboot_type)
+        except Exception as e:
+            logger.error("OpenstackDriver: Reboot Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_console(self, server_id, console_type = 'novnc'):
+        """
+        Arguments:
+           server_id (string) : UUID of the server to be rebooted
+           console_type(string):
+                               'novnc',
+                               'xvpvnc'
+        Returns:
+          A dictionary object response for console information
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            console_info = nvconn.servers.get_vnc_console(server_id, console_type)
+        except Exception as e:
+            logger.error("OpenstackDriver: Server Get-Console operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
+            raise
+        return console_info
+
+    def server_rebuild(self, server_id, image_id):
+        """
+        Arguments:
+           server_id (string) : UUID of the server to be rebooted
+           image_id (string)  : UUID of the image to use
+        Returns: None
+        """
+
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.rebuild(server_id, image_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Rebuild Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+
+    def server_add_port(self, server_id, port_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server
+           port_id   (string): UUID of the port to be attached
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.interface_attach(server_id,
+                                            port_id,
+                                            net_id = None,
+                                            fixed_ip = None)
+        except Exception as e:
+            logger.error("OpenstackDriver: Server Port Add operation failed for server_id : %s, port_id : %s. Exception: %s" %(server_id, port_id, str(e)))
+            raise
+
+    def server_delete_port(self, server_id, port_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server
+           port_id   (string): UUID of the port to be deleted
+        Returns: None
+
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.interface_detach(server_id, port_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Server Port Delete operation failed for server_id : %s, port_id : %s. Exception: %s" %(server_id, port_id, str(e)))
+            raise
+
+    def floating_ip_list(self):
+        """
+        Arguments:
+            None
+        Returns:
+            List of objects of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            ip_list = nvconn.floating_ips.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: Floating IP List operation failed. Exception: %s" %str(e))
+            raise
+
+        return ip_list
+
+    def floating_ip_create(self, pool):
+        """
+        Arguments:
+           pool (string): Name of the pool (optional)
+        Returns:
+           An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            floating_ip = nvconn.floating_ips.create(pool)
+        except Exception as e:
+            logger.error("OpenstackDriver: Floating IP Create operation failed. Exception: %s"  %str(e))
+            raise
+
+        return floating_ip
+
+    def floating_ip_delete(self, floating_ip):
+        """
+        Arguments:
+           floating_ip: An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+        Returns:
+           None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            floating_ip = nvconn.floating_ips.delete(floating_ip)
+        except Exception as e:
+            logger.error("OpenstackDriver: Floating IP Delete operation failed. Exception: %s"  %str(e))
+            raise
+
+    def floating_ip_assign(self, server_id, floating_ip, fixed_ip):
+        """
+        Arguments:
+           server_id (string)  : UUID of the server
+           floating_ip (string): IP address string for floating-ip
+           fixed_ip (string)   : IP address string for the fixed-ip with which floating ip will be associated
+        Returns:
+           None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.add_floating_ip(server_id, floating_ip, fixed_ip)
+        except Exception as e:
+            logger.error("OpenstackDriver: Assign Floating IP operation failed. Exception: %s"  %str(e))
+            raise
+
+    def floating_ip_release(self, server_id, floating_ip):
+        """
+        Arguments:
+           server_id (string)  : UUID of the server
+           floating_ip (string): IP address string for floating-ip
+        Returns:
+           None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.remove_floating_ip(server_id, floating_ip)
+        except Exception as e:
+            logger.error("OpenstackDriver: Release Floating IP operation failed. Exception: %s"  %str(e))
+            raise
+
+    def group_list(self):
+        """
+        List of Server Affinity and Anti-Affinity Groups
+
+        Arguments:
+            None
+        Returns:
+           List of dictionary objects where dictionary is representation of class (novaclient.v2.server_groups.ServerGroup)
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            group_list = nvconn.server_groups.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: Server Group List operation failed. Exception: %s"  %str(e))
+            raise
+
+        group_info = [ group.to_dict() for group in group_list ]
+        return group_info
+
+
+
+class NovaDriverV2(NovaDriver):
+    """
+    Driver class for novaclient V2 APIs
+    """
+    def __init__(self, ks_drv):
+        """
+        Constructor for NovaDriver
+        Arguments: KeystoneDriver class object
+        """
+        super(NovaDriverV2, self).__init__(ks_drv, 'compute', '2.0')
+
+class NovaDriverV21(NovaDriver):
+    """
+    Driver class for novaclient V2 APIs
+    """
+    def __init__(self, ks_drv):
+        """
+        Constructor for NovaDriver
+        Arguments: KeystoneDriver class object
+        """
+        super(NovaDriverV21, self).__init__(ks_drv, 'computev21', '2.1')
+
+class GlanceDriver(object):
+    """
+    Driver for openstack glance-client
+    """
+    def __init__(self, ks_drv, service_name, version):
+        """
+        Constructor for GlanceDriver
+        Arguments: KeystoneDriver class object
+        """
+        self.ks_drv = ks_drv
+        self._service_name = service_name
+        self._version = version
+
+    def _get_glance_credentials(self):
+        """
+        Returns a dictionary of kwargs required to instantiate python-glanceclient class
+
+        Arguments: None
+
+        Returns:
+           A dictionary object of arguments
+        """
+        creds             = {}
+        creds['version']  = self._version
+        creds['endpoint'] = self.ks_drv.get_service_endpoint(self._service_name, 'publicURL')
+        creds['token']    = self.ks_drv.get_auth_token()
+        creds['insecure'] = self.ks_drv.get_security_mode()
+        return creds
+
+    def _get_glance_connection(self):
+        """
+        Returns a object of class python-glanceclient
+        """
+        if not hasattr(self, '_glance_connection'):
+            self._glance_connection = glclient.Client(**self._get_glance_credentials())
+        else:
+            # Reinitialize if auth_token is no longer valid
+            if not self.ks_drv.is_auth_token_valid():
+                self._glance_connection = glclient.Client(**self._get_glance_credentials())
+        return self._glance_connection
+
+    def image_list(self):
+        """
+        Returns list of dictionaries. Each dictionary contains attributes associated with
+        image
+
+        Arguments: None
+
+        Returns: List of dictionaries.
+        """
+        glconn = self._get_glance_connection()
+        images = []
+        try:
+            image_info = glconn.images.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: List Image operation failed. Exception: %s" %(str(e)))
+            raise
+        images = [ img for img in image_info ]
+        return images
+
+    def image_create(self, **kwargs):
+        """
+        Creates an image
+        Arguments:
+           A dictionary of kwargs with following keys
+           {
+              'name'(string)         : Name of the image
+              'location'(string)     : URL (http://....) where image is located
+              'disk_format'(string)  : Disk format
+                    Possible values are 'ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'
+              'container_format'(string): Container format
+                                       Possible values are 'ami', 'ari', 'aki', 'bare', 'ovf'
+              'tags'                 : A list of user tags
+              'checksum'             : The image md5 checksum
+           }
+        Returns:
+           image_id (string)  : UUID of the image
+
+        """
+        glconn = self._get_glance_connection()
+        try:
+            image = glconn.images.create(**kwargs)
+        except Exception as e:
+            logger.error("OpenstackDriver: Create Image operation failed. Exception: %s" %(str(e)))
+            raise
+
+        return image.id
+
+    def image_upload(self, image_id, fd):
+        """
+        Upload the image
+
+        Arguments:
+            image_id: UUID of the image
+            fd      : File descriptor for the image file
+        Returns: None
+        """
+        glconn = self._get_glance_connection()
+        try:
+            glconn.images.upload(image_id, fd)
+        except Exception as e:
+            logger.error("OpenstackDriver: Image upload operation failed. Exception: %s" %(str(e)))
+            raise
+
+    def image_add_location(self, image_id, location, metadata):
+        """
+        Add image URL location
+
+        Arguments:
+           image_id : UUID of the image
+           location : http URL for the image
+
+        Returns: None
+        """
+        glconn = self._get_glance_connection()
+        try:
+            image = glconn.images.add_location(image_id, location, metadata)
+        except Exception as e:
+            logger.error("OpenstackDriver: Image location add operation failed. Exception: %s" %(str(e)))
+            raise
+
+    def image_update(self):
+        pass
+
+    def image_delete(self, image_id):
+        """
+        Delete an image
+
+        Arguments:
+           image_id: UUID of the image
+
+        Returns: None
+
+        """
+        assert image_id == self._image_get(image_id)['id']
+        glconn = self._get_glance_connection()
+        try:
+            glconn.images.delete(image_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete Image operation failed for image_id : %s. Exception: %s" %(image_id, str(e)))
+            raise
+
+
+    def _image_get(self, image_id):
+        """
+        Returns a dictionary object of VM image attributes
+
+        Arguments:
+           image_id (string): UUID of the image
+
+        Returns:
+           A dictionary of the image attributes
+        """
+        glconn = self._get_glance_connection()
+        try:
+            image = glconn.images.get(image_id)
+        except GlanceException.HTTPBadRequest:
+            # RIFT-14241: The get image request occasionally returns the below message.  Retry in case of bad request exception.
+            # Error code 400.: Message: Bad request syntax ('0').: Error code explanation: 400 = Bad request syntax or unsupported method. (HTTP 400)
+            logger.warning("OpenstackDriver: Got bad request response during get_image request.  Retrying.")
+            image = glconn.images.get(image_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Get Image operation failed for image_id : %s. Exception: %s" %(image_id, str(e)))
+            raise
+
+        return image
+
+    def image_get(self, image_id):
+        """
+        Returns a dictionary object of VM image attributes
+
+        Arguments:
+           image_id (string): UUID of the image
+
+        Returns:
+           A dictionary of the image attributes
+        """
+        return self._image_get(image_id)
+
+class GlanceDriverV2(GlanceDriver):
+    """
+    Driver for openstack glance-client V2
+    """
+    def __init__(self, ks_drv):
+        super(GlanceDriverV2, self).__init__(ks_drv, 'image', 2)
+
+class NeutronDriver(object):
+    """
+    Driver for openstack neutron neutron-client
+    """
+    def __init__(self, ks_drv, service_name, version):
+        """
+        Constructor for NeutronDriver
+        Arguments: KeystoneDriver class object
+        """
+        self.ks_drv = ks_drv
+        self._service_name = service_name
+        self._version = version
+
+    def _get_neutron_credentials(self):
+        """
+        Returns a dictionary of kwargs required to instantiate python-neutronclient class
+
+        Returns:
+          Dictionary of kwargs
+        """
+        creds                 = {}
+        creds['api_version']  = self._version
+        creds['endpoint_url'] = self.ks_drv.get_service_endpoint(self._service_name, 'publicURL')
+        creds['token']        = self.ks_drv.get_auth_token()
+        creds['tenant_name']  = self.ks_drv.get_tenant_name()
+        creds['insecure']     = self.ks_drv.get_security_mode()
+        return creds
+
+    def _get_neutron_connection(self):
+        """
+        Returns an object of class python-neutronclient
+        """
+        if not hasattr(self, '_neutron_connection'):
+            self._neutron_connection = ntclient.Client(**self._get_neutron_credentials())
+        else:
+            # Reinitialize if auth_token is no longer valid
+            if not self.ks_drv.is_auth_token_valid():
+                self._neutron_connection = ntclient.Client(**self._get_neutron_credentials())
+        return self._neutron_connection
+
+    def network_list(self):
+        """
+        Returns list of dictionaries. Each dictionary contains the attributes for a network
+        under project
+
+        Arguments: None
+
+        Returns:
+          A list of dictionaries
+        """
+        networks = []
+        ntconn   = self._get_neutron_connection()
+        try:
+            networks = ntconn.list_networks()
+        except Exception as e:
+            logger.error("OpenstackDriver: List Network operation failed. Exception: %s" %(str(e)))
+            raise
+        return networks['networks']
+
+    def network_create(self, **kwargs):
+        """
+        Creates a new network for the project
+
+        Arguments:
+          A dictionary with following key-values
+        {
+          name (string)              : Name of the network
+          admin_state_up(Boolean)    : True/False (Defaults: True)
+          external_router(Boolean)   : Connectivity with external router. True/False (Defaults: False)
+          shared(Boolean)            : Shared among tenants. True/False (Defaults: False)
+          physical_network(string)   : The physical network where this network object is implemented (optional).
+          network_type               : The type of physical network that maps to this network resource (optional).
+                                       Possible values are: 'flat', 'vlan', 'vxlan', 'gre'
+          segmentation_id            : An isolated segment on the physical network. The network_type attribute
+                                       defines the segmentation model. For example, if the network_type value
+                                       is vlan, this ID is a vlan identifier. If the network_type value is gre,
+                                       this ID is a gre key.
+        }
+        """
+        params = {'network':
+                  {'name'                 : kwargs['name'],
+                   'admin_state_up'       : kwargs['admin_state_up'],
+                   'tenant_id'            : self.ks_drv.get_tenant_id(),
+                   'shared'               : kwargs['shared'],
+                   #'port_security_enabled': port_security_enabled,
+                   'router:external'      : kwargs['external_router']}}
+
+        if 'physical_network' in kwargs:
+            params['network']['provider:physical_network'] = kwargs['physical_network']
+        if 'network_type' in kwargs:
+            params['network']['provider:network_type'] = kwargs['network_type']
+        if 'segmentation_id' in kwargs:
+            params['network']['provider:segmentation_id'] = kwargs['segmentation_id']
+
+        ntconn = self._get_neutron_connection()
+        try:
+            logger.debug("Calling neutron create_network() with params: %s", str(params))
+            net = ntconn.create_network(params)
+        except Exception as e:
+            logger.error("OpenstackDriver: Create Network operation failed. Exception: %s" %(str(e)))
+            raise
+        logger.debug("Got create_network response from neutron connection: %s", str(net))
+        network_id = net['network']['id']
+        if not network_id:
+            raise Exception("Empty network id returned from create_network. (params: %s)" % str(params))
+
+        return network_id
+
+    def network_delete(self, network_id):
+        """
+        Deletes a network identified by network_id
+
+        Arguments:
+          network_id (string): UUID of the network
+
+        Returns: None
+        """
+        assert network_id == self._network_get(network_id)['id']
+        ntconn = self._get_neutron_connection()
+        try:
+            ntconn.delete_network(network_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete Network operation failed. Exception: %s" %(str(e)))
+            raise
+
+    def _network_get(self, network_id):
+        """
+        Returns a dictionary object describing the attributes of the network
+
+        Arguments:
+           network_id (string): UUID of the network
+
+        Returns:
+           A dictionary object of the network attributes
+        """
+        ntconn = self._get_neutron_connection()
+        network = ntconn.list_networks(id = network_id)['networks']
+        if not network:
+            raise NeutronException.NotFound("Network with id %s not found"%(network_id))
+
+        return network[0]
+
+    def network_get(self, network_id):
+        """
+        Returns a dictionary object describing the attributes of the network
+
+        Arguments:
+           network_id (string): UUID of the network
+
+        Returns:
+           A dictionary object of the network attributes
+        """
+        return self._network_get(network_id)
+
+    def subnet_create(self, **kwargs):
+        """
+        Creates a subnet on the network
+
+        Arguments:
+        A dictionary with following key value pairs
+        {
+          network_id(string)  : UUID of the network where subnet needs to be created
+          subnet_cidr(string) : IPv4 address prefix (e.g. '1.1.1.0/24') for the subnet
+          ip_version (integer): 4 for IPv4 and 6 for IPv6
+        
+        }
+
+        Returns:
+           subnet_id (string): UUID of the created subnet
+        """
+        params = {}
+        params['network_id'] = kwargs['network_id']
+        params['ip_version'] = kwargs['ip_version']
+
+        # if params['ip_version'] == 6:
+        #     assert 0, "IPv6 is not supported"
+        
+        if 'subnetpool_id' in kwargs:
+            params['subnetpool_id'] = kwargs['subnetpool_id']
+        else:
+            params['cidr'] = kwargs['cidr']
+
+        if 'gateway_ip' in kwargs:
+            params['gateway_ip'] = kwargs['gateway_ip']
+        else:
+            params['gateway_ip'] = None
+
+        if 'dhcp_params' in kwargs:
+            params['enable_dhcp'] = kwargs['dhcp_params']['enable_dhcp']
+            if 'start_address' in kwargs['dhcp_params'] and 'count' in kwargs['dhcp_params']:
+                end_address = (ipaddress.IPv4Address(kwargs['dhcp_params']['start_address']) + kwargs['dhcp_params']['count']).compressed
+                params['allocation_pools'] = [ {'start': kwargs['dhcp_params']['start_address'] ,
+                                                'end' : end_address} ]
+                
+        if 'dns_server' in kwargs:
+            params['dns_nameservers'] = []
+            for server in kwargs['dns_server']:
+                params['dns_nameservers'].append(server)
+
+        ntconn = self._get_neutron_connection()
+        try:
+            subnet = ntconn.create_subnet({'subnets': [params]})
+        except Exception as e:
+            logger.error("OpenstackDriver: Create Subnet operation failed. Exception: %s" %(str(e)))
+            raise
+
+        return subnet['subnets'][0]['id']
+
+    def subnet_list(self):
+        """
+        Returns a list of dictionaries. Each dictionary contains attributes describing the subnet
+
+        Arguments: None
+
+        Returns:
+           A dictionary of the objects of subnet attributes
+        """
+        ntconn = self._get_neutron_connection()
+        try:
+            subnets = ntconn.list_subnets()['subnets']
+        except Exception as e:
+            logger.error("OpenstackDriver: List Subnet operation failed. Exception: %s" %(str(e)))
+            raise
+        return subnets
+
+    def _subnet_get(self, subnet_id):
+        """
+        Returns a dictionary object describing the attributes of a subnet.
+
+        Arguments:
+           subnet_id (string): UUID of the subnet
+
+        Returns:
+           A dictionary object of the subnet attributes
+        """
+        ntconn = self._get_neutron_connection()
+        subnets = ntconn.list_subnets(id=subnet_id)
+        if not subnets['subnets']:
+            logger.error("OpenstackDriver: Get subnet operation failed for subnet_id: %s" %(subnet_id))
+            #raise NeutronException.NotFound("Could not find subnet_id %s" %(subnet_id))
+            return {'cidr': ''}
+        else:
+            return subnets['subnets'][0]
+
+    def subnet_get(self, subnet_id):
+        """
+        Returns a dictionary object describing the attributes of a subnet.
+
+        Arguments:
+           subnet_id (string): UUID of the subnet
+
+        Returns:
+           A dictionary object of the subnet attributes
+        """
+        return self._subnet_get(subnet_id)
+
+    def subnet_delete(self, subnet_id):
+        """
+        Deletes a subnet identified by subnet_id
+
+        Arguments:
+           subnet_id (string): UUID of the subnet to be deleted
+
+        Returns: None
+        """
+        ntconn = self._get_neutron_connection()
+        assert subnet_id == self._subnet_get(self,subnet_id)
+        try:
+            ntconn.delete_subnet(subnet_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete Subnet operation failed for subnet_id : %s. Exception: %s" %(subnet_id, str(e)))
+            raise
+
+    def port_list(self, **kwargs):
+        """
+        Returns a list of dictionaries. Each dictionary contains attributes describing the port
+
+        Arguments:
+            kwargs (dictionary): A dictionary for filters for port_list operation
+
+        Returns:
+           A dictionary of the objects of port attributes
+
+        """
+        ports  = []
+        ntconn = self._get_neutron_connection()
+
+        kwargs['tenant_id'] = self.ks_drv.get_tenant_id()
+
+        try:
+            ports  = ntconn.list_ports(**kwargs)
+        except Exception as e:
+            logger.info("OpenstackDriver: List Port operation failed. Exception: %s" %(str(e)))
+            raise
+        return ports['ports']
+
+    def port_create(self, **kwargs):
+        """
+        Create a port in network
+
+        Arguments:
+           A dictionary of following
+           {
+              name (string)      : Name of the port
+              network_id(string) : UUID of the network_id identifying the network to which port belongs
+              subnet_id(string)  : UUID of the subnet_id from which IP-address will be assigned to port
+              vnic_type(string)  : Possible values are "normal", "direct", "macvtap"
+           }
+        Returns:
+           port_id (string)   : UUID of the port
+        """
+        params = {
+            "port": {
+                "admin_state_up"    : kwargs['admin_state_up'],
+                "name"              : kwargs['name'],
+                "network_id"        : kwargs['network_id'],
+                "fixed_ips"         : [ {"subnet_id": kwargs['subnet_id']}],
+                "binding:vnic_type" : kwargs['port_type']}}
+
+        ntconn = self._get_neutron_connection()
+        try:
+            port  = ntconn.create_port(params)
+        except Exception as e:
+            logger.error("OpenstackDriver: Port Create operation failed. Exception: %s" %(str(e)))
+            raise
+        return port['port']['id']
+
+    def _port_get(self, port_id):
+        """
+        Returns a dictionary object describing the attributes of the port
+
+        Arguments:
+           port_id (string): UUID of the port
+
+        Returns:
+           A dictionary object of the port attributes
+        """
+        ntconn = self._get_neutron_connection()
+        port   = ntconn.list_ports(id=port_id)['ports']
+        if not port:
+            raise NeutronException.NotFound("Could not find port_id %s" %(port_id))
+        return port[0]
+
+    def port_get(self, port_id):
+        """
+        Returns a dictionary object describing the attributes of the port
+
+        Arguments:
+           port_id (string): UUID of the port
+
+        Returns:
+           A dictionary object of the port attributes
+        """
+        return self._port_get(port_id)
+
+    def port_delete(self, port_id):
+        """
+        Deletes a port identified by port_id
+
+        Arguments:
+           port_id (string) : UUID of the port
+
+        Returns: None
+        """
+        assert port_id == self._port_get(port_id)['id']
+        ntconn = self._get_neutron_connection()
+        try:
+            ntconn.delete_port(port_id)
+        except Exception as e:
+            logger.error("Port Delete operation failed for port_id : %s. Exception: %s" %(port_id, str(e)))
+            raise
+
+    def security_group_list(self):
+        """
+        Returns a list of dictionaries. Each dictionary contains attributes describing the security group
+
+        Arguments:
+           None
+
+        Returns:
+           A dictionary of the objects of security group attributes
+        """
+        ntconn = self._get_neutron_connection()
+        try:
+            group_list = ntconn.list_security_groups(tenant_id=self.ks_drv.get_tenant_id())
+        except Exception as e:
+            logger.error("List Security group operation, Exception: %s" %(str(e)))
+            raise
+
+        if 'security_groups' in group_list:
+            return group_list['security_groups']
+        else:
+            return []
+
+    def subnetpool_list(self, **kwargs):
+        """
+        Returns a list of dictionaries. Each dictionary contains attributes describing a subnet prefix pool
+
+        Arguments:
+           None
+
+        Returns:
+           A dictionary of the objects of subnet prefix pool
+        """
+        ntconn = self._get_neutron_connection()
+        try:
+            pool_list = ntconn.list_subnetpools(**kwargs)
+        except Exception as e:
+            logger.error("List SubnetPool operation, Exception: %s" %(str(e)))
+            raise
+
+        if 'subnetpools' in pool_list:
+            return pool_list['subnetpools']
+        else:
+            return []
+        
+class NeutronDriverV2(NeutronDriver):
+    """
+    Driver for openstack neutron neutron-client v2
+    """
+    def __init__(self, ks_drv):
+        """
+        Constructor for NeutronDriver
+        Arguments: KeystoneDriver class object
+        """
+        super(NeutronDriverV2, self).__init__(ks_drv, 'network', '2.0')
+
+
+
+class CeilometerDriver(object):
+    """
+    Driver for openstack ceilometer client
+    """
+
+    def __init__(self, ks_drv, service_name, version):
+        """
+        Constructor for CeilometerDriver
+        Arguments: KeystoneDriver class object
+        """
+        self.ks_drv = ks_drv
+        self._service_name = service_name
+        self._version = version
+        self._client = None
+
+    @property
+    def version(self):
+        """The version of the ceilometer client used by the driver"""
+        return self._version
+
+    @property
+    def client(self):
+        """The instance of ceilometer client used by the driver"""
+        if self._client is None or not self.ks_drv.is_auth_token_valid():
+            self._client = ceilo_client.Client(**self.credentials)
+
+        return self._client
+
+    @property
+    def auth_token(self):
+        """The authorization token for the ceilometer client"""
+        try:
+            return self.ks_drv.get_auth_token()
+        except KeystoneExceptions.EndpointNotFound as e:
+            logger.error("OpenstackDriver: unable to get authorization token for ceilometer. Exception: %s" %(str(e)))
+            raise
+
+    @property
+    def security_mode(self):
+        """The security mode for the ceilometer client"""
+        try:
+            return self.ks_drv.get_security_mode()
+        except KeystoneExceptions.EndpointNotFound as e:
+            logger.error("OpenstackDriver: unable to get security mode for ceilometer. Exception: %s" %(str(e)))
+            raise
+
+    @property
+    def endpoint(self):
+        """The service endpoint for the ceilometer client"""
+        try:
+            return self.ks_drv.get_service_endpoint(self._service_name, "publicURL")
+        except KeystoneExceptions.EndpointNotFound as e:
+            logger.error("OpenstackDriver: unable to get endpoint for ceilometer. Exception: %s" %(str(e)))
+            raise
+
+    @property
+    def credentials(self):
+        """A dictionary of credentials for the ceilometer client"""
+        return dict(
+                version=self.version,
+                endpoint=self.endpoint,
+                token=self.auth_token,
+                insecure=self.security_mode,
+                )
+
+    @property
+    def meters(self):
+        """A list of the available meters"""
+        try:
+            return self.client.meters.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: List meters operation failed. Exception: %s" %(str(e)))
+            raise
+
+    @property
+    def alarms(self):
+        """The ceilometer client alarms manager"""
+        return self.client.alarms
+
+    def query_samples(self, vim_instance_id, counter_name, limit=1):
+        """Returns a list of samples
+
+        Arguments:
+            vim_instance_id - the ID of the VIM that the samples are from
+            counter_name    - the counter that the samples will come from
+            limit           - a limit on the number of samples to return
+                              (default: 1)
+
+        Returns:
+            A list of samples
+
+        """
+        try:
+            filter = json.dumps({
+                "and": [
+                    {"=": {"resource": vim_instance_id}},
+                    {"=": {"counter_name": counter_name}}
+                    ]
+                })
+            result = self.client.query_samples.query(filter=filter, limit=limit)
+            return result[-limit:]
+
+        except Exception as e:
+            logger.exception(e)
+
+        return []
+
+
+class CeilometerDriverV2(CeilometerDriver):
+    """
+    Driver for openstack ceilometer ceilometer-client
+    """
+    def __init__(self, ks_drv):
+        """
+        Constructor for CeilometerDriver
+        Arguments: CeilometerDriver class object
+        """
+        super(CeilometerDriverV2, self).__init__(ks_drv, 'metering', '2')
+
+class OpenstackDriver(object):
+    """
+    Driver for openstack nova, neutron, glance, keystone, swift, cinder services
+    """
+    def __init__(self, username, password, auth_url, tenant_name, mgmt_network = None, cert_validate = False):
+        """
+        OpenstackDriver Driver constructor
+        Arguments:
+          username (string)                   : Username for project/tenant.
+          password (string)                   : Password
+          auth_url (string)                   : Keystone Authentication URL.
+          tenant_name (string)                : Openstack project name
+          mgmt_network(string, optional)      : Management network name. Each VM created with this cloud-account will
+                                                have a default interface into management network.
+          cert_validate (boolean, optional)   : In case of SSL/TLS connection if certificate validation is required or not.
+
+        """
+        insecure = not cert_validate
+        if auth_url.find('/v3') != -1:
+            self.ks_drv        = KeystoneDriverV3(username, password, auth_url, tenant_name, insecure)
+            self.glance_drv    = GlanceDriverV2(self.ks_drv)
+            self.nova_drv      = NovaDriverV21(self.ks_drv)
+            self.neutron_drv   = NeutronDriverV2(self.ks_drv)
+            self.ceilo_drv     = CeilometerDriverV2(self.ks_drv)
+        elif auth_url.find('/v2') != -1:
+            self.ks_drv        = KeystoneDriverV2(username, password, auth_url, tenant_name, insecure)
+            self.glance_drv    = GlanceDriverV2(self.ks_drv)
+            self.nova_drv      = NovaDriverV2(self.ks_drv)
+            self.neutron_drv   = NeutronDriverV2(self.ks_drv)
+            self.ceilo_drv     = CeilometerDriverV2(self.ks_drv)
+        else:
+            logger.error("Could not identity the version information for openstack service endpoints. Auth_URL should contain \"/v2\" or \"/v3\" string in it")
+            raise NotImplementedError("Auth URL is wrong or invalid. Only Keystone v2 & v3 supported")
+
+        if mgmt_network != None:
+            self._mgmt_network = mgmt_network
+
+            networks = []
+            try:
+                ntconn   = self.neutron_drv._get_neutron_connection()
+                networks = ntconn.list_networks()
+            except Exception as e:
+                logger.error("OpenstackDriver: List Network operation failed. Exception: %s" %(str(e)))
+                raise
+
+            network_list = [ network for network in networks['networks'] if network['name'] == mgmt_network ]
+
+            if not network_list:
+                raise NeutronException.NotFound("Could not find network %s" %(mgmt_network))
+            self._mgmt_network_id = network_list[0]['id']
+
+    def validate_account_creds(self):
+        try:
+            ksconn = self.ks_drv._get_keystone_connection()
+        except KeystoneExceptions.AuthorizationFailure as e:
+            logger.error("OpenstackDriver: Unable to authenticate or validate the existing credentials. Exception: %s" %(str(e)))
+            raise ValidationError("Invalid Credentials: "+ str(e))
+        except Exception as e:
+            logger.error("OpenstackDriver: Could not connect to Openstack. Exception: %s" %(str(e)))
+            raise ValidationError("Connection Error: "+ str(e))
+
+    def get_mgmt_network_id(self):
+        return self._mgmt_network_id
+
+    def glance_image_create(self, **kwargs):
+        if not 'disk_format' in kwargs:
+            kwargs['disk_format'] = 'qcow2'
+        if not 'container_format' in kwargs:
+            kwargs['container_format'] = 'bare'
+        if not 'min_disk' in kwargs:
+            kwargs['min_disk'] = 0
+        if not 'min_ram' in kwargs:
+            kwargs['min_ram'] = 0
+        return self.glance_drv.image_create(**kwargs)
+
+    def glance_image_upload(self, image_id, fd):
+        self.glance_drv.image_upload(image_id, fd)
+
+    def glance_image_add_location(self, image_id, location):
+        self.glance_drv.image_add_location(image_id, location)
+
+    def glance_image_delete(self, image_id):
+        self.glance_drv.image_delete(image_id)
+
+    def glance_image_list(self):
+        return self.glance_drv.image_list()
+
+    def glance_image_get(self, image_id):
+        return self.glance_drv.image_get(image_id)
+
+
+    def nova_flavor_list(self):
+        return self.nova_drv.flavor_list()
+
+    def nova_flavor_create(self, name, ram, vcpus, disk, epa_specs):
+        extra_specs = epa_specs if epa_specs else {}
+        return self.nova_drv.flavor_create(name,
+                                           ram         = ram,
+                                           vcpu        = vcpus,
+                                           disk        = disk,
+                                           extra_specs = extra_specs)
+
+    def nova_flavor_delete(self, flavor_id):
+        self.nova_drv.flavor_delete(flavor_id)
+
+    def nova_flavor_get(self, flavor_id):
+        return self.nova_drv.flavor_get(flavor_id)
+
+    def nova_server_create(self, **kwargs):
+        assert kwargs['flavor_id'] == self.nova_drv.flavor_get(kwargs['flavor_id'])['id']
+        image = self.glance_drv.image_get(kwargs['image_id'])
+        if image['status'] != 'active':
+            raise GlanceException.NotFound("Image with image_id: %s not found in active state. Current State: %s" %(image['id'], image['status']))
+
+        # if 'network_list' in kwargs:
+        #     kwargs['network_list'].append(self._mgmt_network_id)
+        # else:
+        #     kwargs['network_list'] = [self._mgmt_network_id]
+
+        if 'security_groups' not in kwargs:
+            nvconn = self.nova_drv._get_nova_connection()
+            sec_groups = nvconn.security_groups.list()
+            if sec_groups:
+                ## Should we add VM in all availability security_groups ???
+                kwargs['security_groups'] = [x.name for x in sec_groups]
+            else:
+                kwargs['security_groups'] = None
+
+        return self.nova_drv.server_create(**kwargs)
+
+    def nova_server_add_port(self, server_id, port_id):
+        self.nova_drv.server_add_port(server_id, port_id)
+
+    def nova_server_delete_port(self, server_id, port_id):
+        self.nova_drv.server_delete_port(server_id, port_id)
+
+    def nova_server_start(self, server_id):
+        self.nova_drv.server_start(server_id)
+
+    def nova_server_stop(self, server_id):
+        self.nova_drv.server_stop(server_id)
+
+    def nova_server_delete(self, server_id):
+        self.nova_drv.server_delete(server_id)
+
+    def nova_server_reboot(self, server_id):
+        self.nova_drv.server_reboot(server_id, reboot_type='HARD')
+
+    def nova_server_rebuild(self, server_id, image_id):
+        self.nova_drv.server_rebuild(server_id, image_id)
+
+    def nova_floating_ip_list(self):
+        return self.nova_drv.floating_ip_list()
+
+    def nova_floating_ip_create(self, pool = None):
+        return self.nova_drv.floating_ip_create(pool)
+
+    def nova_floating_ip_delete(self, floating_ip):
+        self.nova_drv.floating_ip_delete(floating_ip)
+
+    def nova_floating_ip_assign(self, server_id, floating_ip, fixed_ip):
+        self.nova_drv.floating_ip_assign(server_id, floating_ip, fixed_ip)
+
+    def nova_floating_ip_release(self, server_id, floating_ip):
+        self.nova_drv.floating_ip_release(server_id, floating_ip)
+
+    def nova_server_list(self):
+        return self.nova_drv.server_list()
+
+    def nova_server_get(self, server_id):
+        return self.nova_drv.server_get(server_id)
+
+    def nova_server_console(self, server_id):
+        return self.nova_drv.server_console(server_id)
+
+    def nova_server_group_list(self):
+        return self.nova_drv.group_list()
+
+    def neutron_network_list(self):
+        return self.neutron_drv.network_list()
+
+    def neutron_network_get(self, network_id):
+        return self.neutron_drv.network_get(network_id)
+
+    def neutron_network_create(self, **kwargs):
+        return self.neutron_drv.network_create(**kwargs)
+
+    def neutron_network_delete(self, network_id):
+        self.neutron_drv.network_delete(network_id)
+
+    def neutron_subnet_list(self):
+        return self.neutron_drv.subnet_list()
+
+    def neutron_subnet_get(self, subnet_id):
+        return self.neutron_drv.subnet_get(subnet_id)
+
+    def neutron_subnet_create(self, **kwargs):
+        return self.neutron_drv.subnet_create(**kwargs)
+
+    def netruon_subnet_delete(self, subnet_id):
+        self.neutron_drv.subnet_delete(subnet_id)
+
+    def neutron_subnetpool_list(self):
+        return self.neutron_drv.subnetpool_list()
+
+    def netruon_subnetpool_by_name(self, pool_name):
+        pool_list = self.neutron_drv.subnetpool_list(**{'name': pool_name})
+        if pool_list:
+            return pool_list[0]
+        else:
+            return None
+        
+    def neutron_port_list(self, **kwargs):
+        return self.neutron_drv.port_list(**kwargs)
+
+    def neutron_port_get(self, port_id):
+        return self.neutron_drv.port_get(port_id)
+
+    def neutron_port_create(self, **kwargs):
+        subnets = [subnet for subnet in self.neutron_drv.subnet_list() if subnet['network_id'] == kwargs['network_id']]
+        assert len(subnets) == 1
+        kwargs['subnet_id'] = subnets[0]['id']
+        if not 'admin_state_up' in kwargs:
+            kwargs['admin_state_up'] = True
+        port_id =  self.neutron_drv.port_create(**kwargs)
+
+        if 'vm_id' in kwargs:
+            self.nova_server_add_port(kwargs['vm_id'], port_id)
+        return port_id
+
+    def neutron_security_group_list(self):
+        return self.neutron_drv.security_group_list()
+
+    def neutron_security_group_by_name(self, group_name):
+        group_list = self.neutron_drv.security_group_list()
+        groups = [group for group in group_list if group['name'] == group_name]
+        if groups:
+            return groups[0]
+        else:
+            return None
+
+    def neutron_port_delete(self, port_id):
+        self.neutron_drv.port_delete(port_id)
+
+    def ceilo_meter_endpoint(self):
+        return self.ceilo_drv.endpoint
+
+    def ceilo_meter_list(self):
+        return self.ceilo_drv.meters
+
+    def ceilo_nfvi_metrics(self, vim_id):
+        """Returns a dict of NFVI metrics for a given VM
+
+        Arguments:
+            vim_id - the VIM ID of the VM to retrieve the metrics for
+
+        Returns:
+            A dict of NFVI metrics
+
+        """
+        def query_latest_sample(counter_name):
+            try:
+                filter = json.dumps({
+                    "and": [
+                        {"=": {"resource": vim_id}},
+                        {"=": {"counter_name": counter_name}}
+                        ]
+                    })
+                orderby = json.dumps([{"timestamp": "DESC"}])
+                result = self.ceilo_drv.client.query_samples.query(
+                        filter=filter,
+                        orderby=orderby,
+                        limit=1,
+                        )
+                return result[0]
+
+            except IndexError:
+                pass
+
+            except Exception as e:
+                logger.error("Got exception while querying ceilometer, exception details:%s " %str(e))
+
+            return None
+
+        memory_usage = query_latest_sample("memory.usage")
+        disk_usage = query_latest_sample("disk.usage")
+        cpu_util = query_latest_sample("cpu_util")
+
+        metrics = dict()
+
+        if memory_usage is not None:
+            memory_usage.volume = 1e6 * memory_usage.volume
+            metrics["memory_usage"] = memory_usage.to_dict()
+
+        if disk_usage is not None:
+            metrics["disk_usage"] = disk_usage.to_dict()
+
+        if cpu_util is not None:
+            metrics["cpu_util"] = cpu_util.to_dict()
+
+        return metrics
+
+    def ceilo_alarm_list(self):
+        """Returns a list of ceilometer alarms"""
+        return self.ceilo_drv.client.alarms.list()
+
+    def ceilo_alarm_create(self,
+                           name,
+                           meter,
+                           statistic,
+                           operation,
+                           threshold,
+                           period,
+                           evaluations,
+                           severity='low',
+                           repeat=True,
+                           enabled=True,
+                           actions=None,
+                           **kwargs):
+        """Create a new Alarm
+
+        Arguments:
+            name        - the name of the alarm
+            meter       - the name of the meter to measure
+            statistic   - the type of statistic used to trigger the alarm
+                          ('avg', 'min', 'max', 'count', 'sum')
+            operation   - the relational operator that, combined with the
+                          threshold value, determines  when the alarm is
+                          triggered ('lt', 'le', 'eq', 'ge', 'gt')
+            threshold   - the value of the statistic that will trigger the
+                          alarm
+            period      - the duration (seconds) over which to evaluate the
+                          specified statistic
+            evaluations - the number of samples of the meter statistic to
+                          collect when evaluating the threshold
+            severity    - a measure of the urgency or importance of the alarm
+                          ('low', 'moderate', 'critical')
+            repeat      - a flag that indicates whether the alarm should be
+                          triggered once (False) or repeatedly while the alarm
+                          condition is true (True)
+            enabled     - a flag that indicates whether the alarm is enabled
+                          (True) or disabled (False)
+            actions     - a dict specifying the URLs for webhooks. The dict can
+                          have up to 3 keys: 'insufficient_data', 'alarm',
+                          'ok'. Each key is associated with a list of URLs to
+                          webhooks that will be invoked when one of the 3
+                          actions is taken.
+            kwargs      - an arbitrary dict of keyword arguments that are
+                          passed to the ceilometer client
+
+        """
+        ok_actions = actions.get('ok') if actions is not None else None
+        alarm_actions = actions.get('alarm') if actions is not None else None
+        insufficient_data_actions = actions.get('insufficient_data') if actions is not None else None
+
+        return self.ceilo_drv.client.alarms.create(
+                name=name,
+                meter_name=meter,
+                statistic=statistic,
+                comparison_operator=operation,
+                threshold=threshold,
+                period=period,
+                evaluation_periods=evaluations,
+                severity=severity,
+                repeat_actions=repeat,
+                enabled=enabled,
+                ok_actions=ok_actions,
+                alarm_actions=alarm_actions,
+                insufficient_data_actions=insufficient_data_actions,
+                **kwargs
+                )
+
+    def ceilo_alarm_update(self, alarm_id, **kwargs):
+        """Updates an existing alarm
+
+        Arguments:
+            alarm_id - the identifier of the alarm to update
+            kwargs   - a dict of the alarm attributes to update
+
+        """
+        return self.ceilo_drv.client.alarms.update(alarm_id, **kwargs)
+
+    def ceilo_alarm_delete(self, alarm_id):
+        self.ceilo_drv.client.alarms.delete(alarm_id)
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_utils.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_utils.py
new file mode 100644
index 0000000..eda3ccb
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_utils.py
@@ -0,0 +1,552 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+import re
+
+class OpenstackGuestEPAUtils(object):
+    """
+    Utility class for Host EPA to Openstack flavor extra_specs conversion routines
+    """
+    def __init__(self):
+        self._mano_to_espec_cpu_pinning_policy = {
+            'DEDICATED' : 'dedicated',
+            'SHARED'    : 'shared',
+            'ANY'       : 'any',
+        }
+
+        self._espec_to_mano_cpu_pinning_policy = {
+            'dedicated' : 'DEDICATED',
+            'shared'    : 'SHARED',
+            'any'       : 'ANY',
+        }
+        
+        self._mano_to_espec_mempage_size = {
+            'LARGE'        : 'large', 
+            'SMALL'        : 'small',
+            'SIZE_2MB'     :  2048,
+            'SIZE_1GB'     :  1048576,
+            'PREFER_LARGE' : 'large',
+        }
+
+        self._espec_to_mano_mempage_size = {
+            'large'        : 'LARGE', 
+            'small'        : 'SMALL',
+             2048          : 'SIZE_2MB',
+             1048576       : 'SIZE_1GB',
+            'large'        : 'PREFER_LARGE',
+        }
+
+        self._mano_to_espec_cpu_thread_pinning_policy = {
+            'AVOID'    : 'avoid',
+            'SEPARATE' : 'separate',
+            'ISOLATE'  : 'isolate',
+            'PREFER'   : 'prefer',
+        }
+
+        self._espec_to_mano_cpu_thread_pinning_policy = {
+            'avoid'    : 'AVOID',
+            'separate' : 'SEPARATE',
+            'isolate'  : 'ISOLATE',
+            'prefer'   : 'PREFER',
+        }
+
+        self._espec_to_mano_numa_memory_policy = {
+            'strict'   : 'STRICT',
+            'preferred': 'PREFERRED'
+        }
+
+        self._mano_to_espec_numa_memory_policy = {
+            'STRICT'   : 'strict',
+            'PREFERRED': 'preferred'
+        }
+
+    def mano_to_extra_spec_cpu_pinning_policy(self, cpu_pinning_policy):
+        if cpu_pinning_policy in self._mano_to_espec_cpu_pinning_policy:
+            return self._mano_to_espec_cpu_pinning_policy[cpu_pinning_policy]
+        else:
+            return None
+
+    def extra_spec_to_mano_cpu_pinning_policy(self, cpu_pinning_policy):
+        if cpu_pinning_policy in self._espec_to_mano_cpu_pinning_policy:
+            return self._espec_to_mano_cpu_pinning_policy[cpu_pinning_policy]
+        else:
+            return None
+
+    def mano_to_extra_spec_mempage_size(self, mempage_size):
+        if mempage_size in self._mano_to_espec_mempage_size:
+            return self._mano_to_espec_mempage_size[mempage_size]
+        else:
+            return None
+        
+    def extra_spec_to_mano_mempage_size(self, mempage_size):
+        if mempage_size in self._espec_to_mano_mempage_size:
+            return self._espec_to_mano_mempage_size[mempage_size]
+        else:
+            return None
+
+    def mano_to_extra_spec_cpu_thread_pinning_policy(self, cpu_thread_pinning_policy):
+        if cpu_thread_pinning_policy in self._mano_to_espec_cpu_thread_pinning_policy:
+            return self._mano_to_espec_cpu_thread_pinning_policy[cpu_thread_pinning_policy]
+        else:
+            return None
+
+    def extra_spec_to_mano_cpu_thread_pinning_policy(self, cpu_thread_pinning_policy):
+        if cpu_thread_pinning_policy in self._espec_to_mano_cpu_thread_pinning_policy:
+            return self._espec_to_mano_cpu_thread_pinning_policy[cpu_thread_pinning_policy]
+        else:
+            return None
+
+    def mano_to_extra_spec_trusted_execution(self, trusted_execution):
+        if trusted_execution:
+            return 'trusted'
+        else:
+            return 'untrusted'
+
+    def extra_spec_to_mano_trusted_execution(self, trusted_execution):
+        if trusted_execution == 'trusted':
+            return True
+        elif trusted_execution == 'untrusted':
+            return False
+        else:
+            return None
+        
+    def mano_to_extra_spec_numa_node_count(self, numa_node_count):
+        return numa_node_count
+
+    def extra_specs_to_mano_numa_node_count(self, numa_node_count):
+        return int(numa_node_count)
+    
+    def mano_to_extra_spec_numa_memory_policy(self, numa_memory_policy):
+        if numa_memory_policy in self._mano_to_espec_numa_memory_policy:
+            return self._mano_to_espec_numa_memory_policy[numa_memory_policy]
+        else:
+            return None
+
+    def extra_to_mano_spec_numa_memory_policy(self, numa_memory_policy):
+        if numa_memory_policy in self._espec_to_mano_numa_memory_policy:
+            return self._espec_to_mano_numa_memory_policy[numa_memory_policy]
+        else:
+            return None
+        
+                                                          
+    
+    
+class OpenstackHostEPAUtils(object):
+    """
+    Utility class for Host EPA to Openstack flavor extra_specs conversion routines
+    """
+    def __init__(self):
+        self._mano_to_espec_cpumodel = {
+            "PREFER_WESTMERE"     : "Westmere",
+            "REQUIRE_WESTMERE"    : "Westmere",
+            "PREFER_SANDYBRIDGE"  : "SandyBridge",
+            "REQUIRE_SANDYBRIDGE" : "SandyBridge",
+            "PREFER_IVYBRIDGE"    : "IvyBridge",
+            "REQUIRE_IVYBRIDGE"   : "IvyBridge",
+            "PREFER_HASWELL"      : "Haswell",
+            "REQUIRE_HASWELL"     : "Haswell",
+            "PREFER_BROADWELL"    : "Broadwell",
+            "REQUIRE_BROADWELL"   : "Broadwell",
+            "PREFER_NEHALEM"      : "Nehalem",
+            "REQUIRE_NEHALEM"     : "Nehalem",
+            "PREFER_PENRYN"       : "Penryn",
+            "REQUIRE_PENRYN"      : "Penryn",
+            "PREFER_CONROE"       : "Conroe",
+            "REQUIRE_CONROE"      : "Conroe",
+            "PREFER_CORE2DUO"     : "Core2Duo",
+            "REQUIRE_CORE2DUO"    : "Core2Duo",
+        }
+
+        self._espec_to_mano_cpumodel = {
+            "Westmere"     : "REQUIRE_WESTMERE",
+            "SandyBridge"  : "REQUIRE_SANDYBRIDGE",
+            "IvyBridge"    : "REQUIRE_IVYBRIDGE",
+            "Haswell"      : "REQUIRE_HASWELL",
+            "Broadwell"    : "REQUIRE_BROADWELL",
+            "Nehalem"      : "REQUIRE_NEHALEM",
+            "Penryn"       : "REQUIRE_PENRYN",
+            "Conroe"       : "REQUIRE_CONROE",
+            "Core2Duo"     : "REQUIRE_CORE2DUO",
+        }
+
+        self._mano_to_espec_cpuarch = {
+            "PREFER_X86"     : "x86",
+            "REQUIRE_X86"    : "x86",
+            "PREFER_X86_64"  : "x86_64",
+            "REQUIRE_X86_64" : "x86_64",
+            "PREFER_I686"    : "i686",
+            "REQUIRE_I686"   : "i686",
+            "PREFER_IA64"    : "ia64",
+            "REQUIRE_IA64"   : "ia64",
+            "PREFER_ARMV7"   : "ARMv7",
+            "REQUIRE_ARMV7"  : "ARMv7",
+            "PREFER_ARMV8"   : "ARMv8-A",
+            "REQUIRE_ARMV8"  : "ARMv8-A",
+        }
+
+        self._espec_to_mano_cpuarch = {
+            "x86"     : "REQUIRE_X86",
+            "x86_64"  : "REQUIRE_X86_64",
+            "i686"    : "REQUIRE_I686",
+            "ia64"    : "REQUIRE_IA64",
+            "ARMv7-A" : "REQUIRE_ARMV7",
+            "ARMv8-A" : "REQUIRE_ARMV8",
+        }
+
+        self._mano_to_espec_cpuvendor = {
+            "PREFER_INTEL"  : "Intel",
+            "REQUIRE_INTEL" : "Intel",
+            "PREFER_AMD"    : "AMD",
+            "REQUIRE_AMD"   : "AMD",
+        }
+
+        self._espec_to_mano_cpuvendor = {
+            "Intel" : "REQUIRE_INTEL",
+            "AMD"   : "REQUIRE_AMD",
+        }
+
+        self._mano_to_espec_cpufeatures = {
+            "PREFER_AES"       : "aes",
+            "REQUIRE_AES"      : "aes",
+            "REQUIRE_VME"      : "vme",
+            "PREFER_VME"       : "vme",
+            "REQUIRE_DE"       : "de",
+            "PREFER_DE"        : "de",
+            "REQUIRE_PSE"      : "pse",
+            "PREFER_PSE"       : "pse",
+            "REQUIRE_TSC"      : "tsc",
+            "PREFER_TSC"       : "tsc",
+            "REQUIRE_MSR"      : "msr",
+            "PREFER_MSR"       : "msr",
+            "REQUIRE_PAE"      : "pae",
+            "PREFER_PAE"       : "pae",
+            "REQUIRE_MCE"      : "mce",
+            "PREFER_MCE"       : "mce",
+            "REQUIRE_CX8"      : "cx8",
+            "PREFER_CX8"       : "cx8",
+            "REQUIRE_APIC"     : "apic",
+            "PREFER_APIC"      : "apic",
+            "REQUIRE_SEP"      : "sep",
+            "PREFER_SEP"       : "sep",
+            "REQUIRE_MTRR"     : "mtrr",
+            "PREFER_MTRR"      : "mtrr",
+            "REQUIRE_PGE"      : "pge",
+            "PREFER_PGE"       : "pge",
+            "REQUIRE_MCA"      : "mca",
+            "PREFER_MCA"       : "mca",
+            "REQUIRE_CMOV"     : "cmov",
+            "PREFER_CMOV"      : "cmov",
+            "REQUIRE_PAT"      : "pat",
+            "PREFER_PAT"       : "pat",
+            "REQUIRE_PSE36"    : "pse36",
+            "PREFER_PSE36"     : "pse36",
+            "REQUIRE_CLFLUSH"  : "clflush",
+            "PREFER_CLFLUSH"   : "clflush",
+            "REQUIRE_DTS"      : "dts",
+            "PREFER_DTS"       : "dts",
+            "REQUIRE_ACPI"     : "acpi",
+            "PREFER_ACPI"      : "acpi",
+            "REQUIRE_MMX"      : "mmx",
+            "PREFER_MMX"       : "mmx",
+            "REQUIRE_FXSR"     : "fxsr",
+            "PREFER_FXSR"      : "fxsr",
+            "REQUIRE_SSE"      : "sse",
+            "PREFER_SSE"       : "sse",
+            "REQUIRE_SSE2"     : "sse2",
+            "PREFER_SSE2"      : "sse2",
+            "REQUIRE_SS"       : "ss",
+            "PREFER_SS"        : "ss",
+            "REQUIRE_HT"       : "ht",
+            "PREFER_HT"        : "ht",
+            "REQUIRE_TM"       : "tm",
+            "PREFER_TM"        : "tm",
+            "REQUIRE_IA64"     : "ia64",
+            "PREFER_IA64"      : "ia64",
+            "REQUIRE_PBE"      : "pbe",
+            "PREFER_PBE"       : "pbe",
+            "REQUIRE_RDTSCP"   : "rdtscp",
+            "PREFER_RDTSCP"    : "rdtscp",
+            "REQUIRE_PNI"      : "pni",
+            "PREFER_PNI"       : "pni",
+            "REQUIRE_PCLMULQDQ": "pclmulqdq",
+            "PREFER_PCLMULQDQ" : "pclmulqdq",
+            "REQUIRE_DTES64"   : "dtes64",
+            "PREFER_DTES64"    : "dtes64",
+            "REQUIRE_MONITOR"  : "monitor",
+            "PREFER_MONITOR"   : "monitor",
+            "REQUIRE_DS_CPL"   : "ds_cpl",
+            "PREFER_DS_CPL"    : "ds_cpl",
+            "REQUIRE_VMX"      : "vmx",
+            "PREFER_VMX"       : "vmx",
+            "REQUIRE_SMX"      : "smx",
+            "PREFER_SMX"       : "smx",
+            "REQUIRE_EST"      : "est",
+            "PREFER_EST"       : "est",
+            "REQUIRE_TM2"      : "tm2",
+            "PREFER_TM2"       : "tm2",
+            "REQUIRE_SSSE3"    : "ssse3",
+            "PREFER_SSSE3"     : "ssse3",
+            "REQUIRE_CID"      : "cid",
+            "PREFER_CID"       : "cid",
+            "REQUIRE_FMA"      : "fma",
+            "PREFER_FMA"       : "fma",
+            "REQUIRE_CX16"     : "cx16",
+            "PREFER_CX16"      : "cx16",
+            "REQUIRE_XTPR"     : "xtpr",
+            "PREFER_XTPR"      : "xtpr",
+            "REQUIRE_PDCM"     : "pdcm",
+            "PREFER_PDCM"      : "pdcm",
+            "REQUIRE_PCID"     : "pcid",
+            "PREFER_PCID"      : "pcid",
+            "REQUIRE_DCA"      : "dca",
+            "PREFER_DCA"       : "dca",
+            "REQUIRE_SSE4_1"   : "sse4_1",
+            "PREFER_SSE4_1"    : "sse4_1",
+            "REQUIRE_SSE4_2"   : "sse4_2",
+            "PREFER_SSE4_2"    : "sse4_2",
+            "REQUIRE_X2APIC"   : "x2apic",
+            "PREFER_X2APIC"    : "x2apic",
+            "REQUIRE_MOVBE"    : "movbe",
+            "PREFER_MOVBE"     : "movbe",
+            "REQUIRE_POPCNT"   : "popcnt",
+            "PREFER_POPCNT"    : "popcnt",
+            "REQUIRE_TSC_DEADLINE_TIMER"   : "tsc_deadline_timer",
+            "PREFER_TSC_DEADLINE_TIMER"    : "tsc_deadline_timer",
+            "REQUIRE_XSAVE"    : "xsave",
+            "PREFER_XSAVE"     : "xsave",
+            "REQUIRE_AVX"      : "avx",
+            "PREFER_AVX"       : "avx",
+            "REQUIRE_F16C"     : "f16c",
+            "PREFER_F16C"      : "f16c",
+            "REQUIRE_RDRAND"   : "rdrand",
+            "PREFER_RDRAND"    : "rdrand",
+            "REQUIRE_FSGSBASE" : "fsgsbase",
+            "PREFER_FSGSBASE"  : "fsgsbase",
+            "REQUIRE_BMI1"     : "bmi1",
+            "PREFER_BMI1"      : "bmi1",
+            "REQUIRE_HLE"      : "hle",
+            "PREFER_HLE"       : "hle",
+            "REQUIRE_AVX2"     : "avx2",
+            "PREFER_AVX2"      : "avx2",
+            "REQUIRE_SMEP"     : "smep",
+            "PREFER_SMEP"      : "smep",
+            "REQUIRE_BMI2"     : "bmi2",
+            "PREFER_BMI2"      : "bmi2",
+            "REQUIRE_ERMS"     : "erms",
+            "PREFER_ERMS"      : "erms",
+            "REQUIRE_INVPCID"  : "invpcid",
+            "PREFER_INVPCID"   : "invpcid",
+            "REQUIRE_RTM"      : "rtm",
+            "PREFER_RTM"       : "rtm",
+            "REQUIRE_MPX"      : "mpx",
+            "PREFER_MPX"       : "mpx",
+            "REQUIRE_RDSEED"   : "rdseed",
+            "PREFER_RDSEED"    : "rdseed",
+            "REQUIRE_ADX"      : "adx",
+            "PREFER_ADX"       : "adx",
+            "REQUIRE_SMAP"     : "smap",
+            "PREFER_SMAP"      : "smap",
+        }
+
+        self._espec_to_mano_cpufeatures = {
+            "aes"      : "REQUIRE_AES",
+            "vme"      : "REQUIRE_VME",
+            "de"       : "REQUIRE_DE",
+            "pse"      : "REQUIRE_PSE",
+            "tsc"      : "REQUIRE_TSC",
+            "msr"      : "REQUIRE_MSR",
+            "pae"      : "REQUIRE_PAE",
+            "mce"      : "REQUIRE_MCE",
+            "cx8"      : "REQUIRE_CX8",
+            "apic"     : "REQUIRE_APIC",
+            "sep"      : "REQUIRE_SEP",
+            "mtrr"     : "REQUIRE_MTRR",
+            "pge"      : "REQUIRE_PGE",
+            "mca"      : "REQUIRE_MCA",
+            "cmov"     : "REQUIRE_CMOV",
+            "pat"      : "REQUIRE_PAT",
+            "pse36"    : "REQUIRE_PSE36",
+            "clflush"  : "REQUIRE_CLFLUSH",
+            "dts"      : "REQUIRE_DTS",
+            "acpi"     : "REQUIRE_ACPI",
+            "mmx"      : "REQUIRE_MMX",
+            "fxsr"     : "REQUIRE_FXSR",
+            "sse"      : "REQUIRE_SSE",
+            "sse2"     : "REQUIRE_SSE2",
+            "ss"       : "REQUIRE_SS",
+            "ht"       : "REQUIRE_HT",
+            "tm"       : "REQUIRE_TM",
+            "ia64"     : "REQUIRE_IA64",
+            "pbe"      : "REQUIRE_PBE",
+            "rdtscp"   : "REQUIRE_RDTSCP",
+            "pni"      : "REQUIRE_PNI",
+            "pclmulqdq": "REQUIRE_PCLMULQDQ",
+            "dtes64"   : "REQUIRE_DTES64",
+            "monitor"  : "REQUIRE_MONITOR",
+            "ds_cpl"   : "REQUIRE_DS_CPL",
+            "vmx"      : "REQUIRE_VMX",
+            "smx"      : "REQUIRE_SMX",
+            "est"      : "REQUIRE_EST",
+            "tm2"      : "REQUIRE_TM2",
+            "ssse3"    : "REQUIRE_SSSE3",
+            "cid"      : "REQUIRE_CID",
+            "fma"      : "REQUIRE_FMA",
+            "cx16"     : "REQUIRE_CX16",
+            "xtpr"     : "REQUIRE_XTPR",
+            "pdcm"     : "REQUIRE_PDCM",
+            "pcid"     : "REQUIRE_PCID",
+            "dca"      : "REQUIRE_DCA",
+            "sse4_1"   : "REQUIRE_SSE4_1",
+            "sse4_2"   : "REQUIRE_SSE4_2",
+            "x2apic"   : "REQUIRE_X2APIC",
+            "movbe"    : "REQUIRE_MOVBE",
+            "popcnt"   : "REQUIRE_POPCNT",
+            "tsc_deadline_timer"   : "REQUIRE_TSC_DEADLINE_TIMER",
+            "xsave"    : "REQUIRE_XSAVE",
+            "avx"      : "REQUIRE_AVX",
+            "f16c"     : "REQUIRE_F16C",
+            "rdrand"   : "REQUIRE_RDRAND",
+            "fsgsbase" : "REQUIRE_FSGSBASE",
+            "bmi1"     : "REQUIRE_BMI1",
+            "hle"      : "REQUIRE_HLE",
+            "avx2"     : "REQUIRE_AVX2",
+            "smep"     : "REQUIRE_SMEP",
+            "bmi2"     : "REQUIRE_BMI2",
+            "erms"     : "REQUIRE_ERMS",
+            "invpcid"  : "REQUIRE_INVPCID",
+            "rtm"      : "REQUIRE_RTM",
+            "mpx"      : "REQUIRE_MPX",
+            "rdseed"   : "REQUIRE_RDSEED",
+            "adx"      : "REQUIRE_ADX",
+            "smap"     : "REQUIRE_SMAP",
+        }
+
+    def mano_to_extra_spec_cpu_model(self, cpu_model):
+        if cpu_model in self._mano_to_espec_cpumodel:
+            return self._mano_to_espec_cpumodel[cpu_model]
+        else:
+            return None
+            
+    def extra_specs_to_mano_cpu_model(self, cpu_model):
+        if cpu_model in self._espec_to_mano_cpumodel:
+            return self._espec_to_mano_cpumodel[cpu_model]
+        else:
+            return None
+        
+    def mano_to_extra_spec_cpu_arch(self, cpu_arch):
+        if cpu_arch in self._mano_to_espec_cpuarch:
+            return self._mano_to_espec_cpuarch[cpu_arch]
+        else:
+            return None
+        
+    def extra_specs_to_mano_cpu_arch(self, cpu_arch):
+        if cpu_arch in self._espec_to_mano_cpuarch:
+            return self._espec_to_mano_cpuarch[cpu_arch]
+        else:
+            return None
+    
+    def mano_to_extra_spec_cpu_vendor(self, cpu_vendor):
+        if cpu_vendor in self._mano_to_espec_cpuvendor:
+            return self._mano_to_espec_cpuvendor[cpu_vendor]
+        else:
+            return None
+
+    def extra_spec_to_mano_cpu_vendor(self, cpu_vendor):
+        if cpu_vendor in self._espec_to_mano_cpuvendor:
+            return self._espec_to_mano_cpuvendor[cpu_vendor]
+        else:
+            return None
+    
+    def mano_to_extra_spec_cpu_socket_count(self, cpu_sockets):
+        return cpu_sockets
+
+    def extra_spec_to_mano_cpu_socket_count(self, cpu_sockets):
+        return int(cpu_sockets)
+    
+    def mano_to_extra_spec_cpu_core_count(self, cpu_core_count):
+        return cpu_core_count
+
+    def extra_spec_to_mano_cpu_core_count(self, cpu_core_count):
+        return int(cpu_core_count)
+    
+    def mano_to_extra_spec_cpu_core_thread_count(self, core_thread_count):
+        return core_thread_count
+
+    def extra_spec_to_mano_cpu_core_thread_count(self, core_thread_count):
+        return int(core_thread_count)
+
+    def mano_to_extra_spec_cpu_features(self, features):
+        cpu_features = []
+        epa_feature_str = None
+        for f in features:
+            if f in self._mano_to_espec_cpufeatures:
+                cpu_features.append(self._mano_to_espec_cpufeatures[f])
+                
+        if len(cpu_features) > 1:
+            epa_feature_str =  '<all-in> '+ " ".join(cpu_features)
+        elif len(cpu_features) == 1:
+            epa_feature_str = " ".join(cpu_features)
+
+        return epa_feature_str
+
+    def extra_spec_to_mano_cpu_features(self, features):
+        oper_symbols = ['=', '<in>', '<all-in>', '==', '!=', '>=', '<=', 's==', 's!=', 's<', 's<=', 's>', 's>=']
+        cpu_features = []
+        result = None
+        for oper in oper_symbols:
+            regex = '^'+oper+' (.*?)$'
+            result = re.search(regex, features)
+            if result is not None:
+                break
+            
+        if result is not None:
+            feature_list = result.group(1)
+        else:
+            feature_list = features
+
+        for f in feature_list.split():
+            if f in self._espec_to_mano_cpufeatures:
+                cpu_features.append(self._espec_to_mano_cpufeatures[f])
+
+        return cpu_features
+    
+
+class OpenstackExtraSpecUtils(object):
+    """
+    General utility class for flavor Extra Specs processing
+    """
+    def __init__(self):
+        self.host = OpenstackHostEPAUtils()
+        self.guest = OpenstackGuestEPAUtils()
+        self.extra_specs_keywords = [ 'hw:cpu_policy',
+                                      'hw:cpu_threads_policy',
+                                      'hw:mem_page_size',
+                                      'hw:numa_nodes',
+                                      'hw:numa_mempolicy',
+                                      'hw:numa_cpus',
+                                      'hw:numa_mem',
+                                      'trust:trusted_host',
+                                      'pci_passthrough:alias',
+                                      'capabilities:cpu_info:model',
+                                      'capabilities:cpu_info:arch',
+                                      'capabilities:cpu_info:vendor',
+                                      'capabilities:cpu_info:topology:sockets',
+                                      'capabilities:cpu_info:topology:cores',
+                                      'capabilities:cpu_info:topology:threads',
+                                      'capabilities:cpu_info:features',
+                                ]
+        self.extra_specs_regex = re.compile("^"+"|^".join(self.extra_specs_keywords))
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py
new file mode 100644
index 0000000..7acf0fd
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py
@@ -0,0 +1,233 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import rift.rwcal.openstack as openstack_drv
+import logging
+import argparse
+import sys, os, time
+import rwlogger
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger()
+
+rwlog_handler = rwlogger.RwLogger(category="rw-cal-log",
+                                  subcategory="openstack",)
+logger.addHandler(rwlog_handler)
+#logger.setLevel(logging.DEBUG)
+
+
+def assign_floating_ip_address(drv, argument):
+    if not argument.floating_ip:
+        return
+
+    server = drv.nova_server_get(argument.server_id)
+    logger.info("Assigning the floating_ip: %s to VM: %s" %(argument.floating_ip, server['name']))
+    
+    for i in range(120):
+        server = drv.nova_server_get(argument.server_id)
+        for network_name,network_info in server['addresses'].items():
+            if network_info:
+                if network_name == argument.mgmt_network:
+                    for n_info in network_info:
+                        if 'OS-EXT-IPS:type' in n_info and n_info['OS-EXT-IPS:type'] == 'fixed':
+                            management_ip = n_info['addr']
+                            drv.nova_floating_ip_assign(argument.server_id,
+                                                        argument.floating_ip,
+                                                        management_ip)
+                            logger.info("Assigned floating_ip: %s to management_ip: %s" %(argument.floating_ip, management_ip))
+                        return
+        logger.info("Waiting for management_ip to be assigned to server: %s" %(server['name']))
+        time.sleep(1)
+    else:
+        logger.info("No management_ip IP available to associate floating_ip for server: %s" %(server['name']))
+    return
+
+
+def create_port_metadata(drv, argument):
+    if argument.port_metadata == False:
+        return
+
+    ### Get Management Network ID
+    network_list = drv.neutron_network_list()
+    mgmt_network_id = [net['id'] for net in network_list if net['name'] == argument.mgmt_network][0]
+    port_list = [ port for port in drv.neutron_port_list(**{'device_id': argument.server_id})
+                  if port['network_id'] != mgmt_network_id ]
+    meta_data = {}
+
+    meta_data['rift-meta-ports'] = str(len(port_list))
+    port_id = 0
+    for port in port_list:
+        info = []
+        info.append('"port_name":"'+port['name']+'"')
+        if 'mac_address' in port:
+            info.append('"hw_addr":"'+port['mac_address']+'"')
+        if 'network_id' in port:
+            #info.append('"network_id":"'+port['network_id']+'"')
+            net_name = [net['name'] for net in network_list if net['id'] == port['network_id']]
+            if net_name:
+                info.append('"network_name":"'+net_name[0]+'"')
+        if 'fixed_ips' in port:
+            ip_address = port['fixed_ips'][0]['ip_address']
+            info.append('"ip":"'+ip_address+'"')
+            
+        meta_data['rift-meta-port-'+str(port_id)] = '{' + ','.join(info) + '}'
+        port_id += 1
+        
+    nvconn = drv.nova_drv._get_nova_connection()
+    nvconn.servers.set_meta(argument.server_id, meta_data)
+    
+        
+def prepare_vm_after_boot(drv,argument):
+    ### Important to call create_port_metadata before assign_floating_ip_address
+    ### since assign_floating_ip_address can wait thus delaying port_metadata creation
+
+    ### Wait for 2 minute for server to come up -- Needs fine tuning
+    wait_time = 120 
+    sleep_time = 1
+    for i in range(int(wait_time/sleep_time)):
+        server = drv.nova_server_get(argument.server_id)
+        if server['status'] == 'ACTIVE':
+            logger.info("Server %s to reached active state" %(server['name']))
+            break
+        elif server['status'] == 'BUILD':
+            logger.info("Waiting for server: %s to build. Current state: %s" %(server['name'], server['status']))
+            time.sleep(sleep_time)
+        else:
+            logger.info("Server %s reached state: %s" %(server['name'], server['status']))
+            sys.exit(3)
+    else:
+        logger.error("Server %s did not reach active state in %d seconds. Current state: %s" %(server['name'], wait_time, server['status']))
+        sys.exit(4)
+    
+    #create_port_metadata(drv, argument)
+    assign_floating_ip_address(drv, argument)
+    
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to create openstack resources')
+    parser.add_argument('--auth_url',
+                        action = "store",
+                        dest = "auth_url",
+                        type = str,
+                        help='Keystone Auth URL')
+
+    parser.add_argument('--username',
+                        action = "store",
+                        dest = "username",
+                        type = str,
+                        help = "Username for openstack installation")
+
+    parser.add_argument('--password',
+                        action = "store",
+                        dest = "password",
+                        type = str,
+                        help = "Password for openstack installation")
+
+    parser.add_argument('--tenant_name',
+                        action = "store",
+                        dest = "tenant_name",
+                        type = str,
+                        help = "Tenant name openstack installation")
+
+    parser.add_argument('--mgmt_network',
+                        action = "store",
+                        dest = "mgmt_network",
+                        type = str,
+                        help = "mgmt_network")
+    
+    parser.add_argument('--server_id',
+                        action = "store",
+                        dest = "server_id",
+                        type = str,
+                        help = "Server ID on which boot operations needs to be performed")
+    
+    parser.add_argument('--floating_ip',
+                        action = "store",
+                        dest = "floating_ip",
+                        type = str,
+                        help = "Floating IP to be assigned")
+
+    parser.add_argument('--port_metadata',
+                        action = "store_true",
+                        dest = "port_metadata",
+                        default = False,
+                        help = "Create Port Metadata")
+
+    argument = parser.parse_args()
+
+    if not argument.auth_url:
+        logger.error("ERROR: AuthURL is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using AuthURL: %s" %(argument.auth_url))
+
+    if not argument.username:
+        logger.error("ERROR: Username is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Username: %s" %(argument.username))
+
+    if not argument.password:
+        logger.error("ERROR: Password is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Password: %s" %(argument.password))
+
+    if not argument.tenant_name:
+        logger.error("ERROR: Tenant Name is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Tenant Name: %s" %(argument.tenant_name))
+
+    if not argument.mgmt_network:
+        logger.error("ERROR: Management Network Name is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Management Network: %s" %(argument.mgmt_network))
+        
+    if not argument.server_id:
+        logger.error("ERROR: Server ID is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Server ID : %s" %(argument.server_id))
+        
+        
+    try:
+        pid = os.fork()
+        if pid > 0:
+            # exit for parent
+            sys.exit(0)
+    except OSError as e:
+        logger.error("fork failed: %d (%s)\n" % (e.errno, e.strerror))
+        sys.exit(2)
+        
+    drv = openstack_drv.OpenstackDriver(username = argument.username,
+                                        password = argument.password,
+                                        auth_url = argument.auth_url,
+                                        tenant_name = argument.tenant_name,
+                                        mgmt_network = argument.mgmt_network)
+    prepare_vm_after_boot(drv, argument)
+    sys.exit(0)
+    
+if __name__ == "__main__":
+    main()
+        
+
diff --git a/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py b/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py
new file mode 100644
index 0000000..8e0c710
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py
@@ -0,0 +1,2122 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import contextlib
+import logging
+import os
+import subprocess
+import uuid
+
+import rift.rwcal.openstack as openstack_drv
+import rw_status
+import rift.cal.rwcal_status as rwcal_status
+import rwlogger
+import neutronclient.common.exceptions as NeutronException
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+PREPARE_VM_CMD = "prepare_vm.py --auth_url {auth_url} --username {username} --password {password} --tenant_name {tenant_name} --mgmt_network {mgmt_network} --server_id {server_id} --port_metadata"
+
+rwstatus_exception_map = { IndexError: RwTypes.RwStatus.NOTFOUND,
+                           KeyError: RwTypes.RwStatus.NOTFOUND,
+                           NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}
+
+rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
+rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
+
+
+espec_utils = openstack_drv.OpenstackExtraSpecUtils()
+
+class OpenstackCALOperationFailure(Exception):
+    pass
+
+class UninitializedPluginError(Exception):
+    pass
+
+
+class OpenstackServerGroupError(Exception):
+    pass
+
+
+class ImageUploadError(Exception):
+    pass
+
+
+class RwcalOpenstackPlugin(GObject.Object, RwCal.Cloud):
+    """This class implements the CAL VALA methods for openstack."""
+
+    instance_num = 1
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self._driver_class = openstack_drv.OpenstackDriver
+        self.log = logging.getLogger('rwcal.openstack.%s' % RwcalOpenstackPlugin.instance_num)
+        self.log.setLevel(logging.DEBUG)
+
+        self._rwlog_handler = None
+        RwcalOpenstackPlugin.instance_num += 1
+
+
+    @contextlib.contextmanager
+    def _use_driver(self, account):
+        if self._rwlog_handler is None:
+            raise UninitializedPluginError("Must call init() in CAL plugin before use.")
+
+        with rwlogger.rwlog_root_handler(self._rwlog_handler):
+            try:
+                drv = self._driver_class(username      = account.openstack.key,
+                                         password      = account.openstack.secret,
+                                         auth_url      = account.openstack.auth_url,
+                                         tenant_name   = account.openstack.tenant,
+                                         mgmt_network  = account.openstack.mgmt_network,
+                                         cert_validate = account.openstack.cert_validate )
+            except Exception as e:
+                self.log.error("RwcalOpenstackPlugin: OpenstackDriver init failed. Exception: %s" %(str(e)))
+                raise
+
+            yield drv
+
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        self._rwlog_handler = rwlogger.RwLogger(
+                category="rw-cal-log",
+                subcategory="openstack",
+                log_hdl=rwlog_ctx,
+                )
+        self.log.addHandler(self._rwlog_handler)
+        self.log.propagate = False
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        Performs an access to the resources using Keystone API. If creds
+        are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus()
+
+        try:
+            with self._use_driver(account) as drv:
+                drv.validate_account_creds()
+
+        except openstack_drv.ValidationError as e:
+            self.log.error("RwcalOpenstackPlugin: OpenstackDriver credential validation failed. Exception: %s", str(e))
+            status.status = "failure"
+            status.details = "Invalid Credentials: %s" % str(e)
+
+        except Exception as e:
+            msg = "RwcalOpenstackPlugin: OpenstackDriver connection failed. Exception: %s" %(str(e))
+            self.log.error(msg)
+            status.status = "failure"
+            status.details = msg
+
+        else:
+            status.status = "success"
+            status.details = "Connection was successful"
+
+        return status
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_management_network(self, account):
+        """
+        Returns the management network associated with the specified account.
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            The management network
+        """
+        return account.openstack.mgmt_network
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_tenant(self, account, name):
+        """Create a new tenant.
+
+        Arguments:
+            account - a cloud account
+            name - name of the tenant
+
+        Returns:
+            The tenant id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """delete a tenant.
+
+        Arguments:
+            account - a cloud account
+            tenant_id - id of the tenant
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """List tenants.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of tenants
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_role(self, account, name):
+        """Create a new user.
+
+        Arguments:
+            account - a cloud account
+            name - name of the user
+
+        Returns:
+            The user id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """Delete a user.
+
+        Arguments:
+            account - a cloud account
+            role_id - id of the user
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """List roles.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of roles
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_image(self, account, image):
+        """Create an image
+
+        Arguments:
+            account - a cloud account
+            image - a description of the image to create
+
+        Returns:
+            The image id
+        """
+
+        try:
+            # If the use passed in a file descriptor, use that to
+            # upload the image.
+            if image.has_field("fileno"):
+                new_fileno = os.dup(image.fileno)
+                hdl = os.fdopen(new_fileno, 'rb')
+            else:
+                hdl = open(image.location, "rb")
+        except Exception as e:
+            self.log.error("Could not open file for upload. Exception received: %s", str(e))
+            raise
+
+        with hdl as fd:
+            kwargs = {}
+            kwargs['name'] = image.name
+
+            if image.disk_format:
+                kwargs['disk_format'] = image.disk_format
+            if image.container_format:
+                kwargs['container_format'] = image.container_format
+
+            with self._use_driver(account) as drv:
+                # Create Image
+                image_id = drv.glance_image_create(**kwargs)
+                # Upload the Image
+                drv.glance_image_upload(image_id, fd)
+
+                if image.checksum:
+                    stored_image = drv.glance_image_get(image_id)
+                    if stored_image.checksum != image.checksum:
+                        drv.glance_image_delete(image_id=image_id)
+                        raise ImageUploadError(
+                                "image checksum did not match (actual: %s, expected: %s). Deleting." %
+                                (stored_image.checksum, image.checksum)
+                                )
+
+        return image_id
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """Delete a vm image.
+
+        Arguments:
+            account - a cloud account
+            image_id - id of the image to delete
+        """
+        with self._use_driver(account) as drv:
+            drv.glance_image_delete(image_id=image_id)
+
+
+    @staticmethod
+    def _fill_image_info(img_info):
+        """Create a GI object from image info dictionary
+
+        Converts image information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            account - a cloud account
+            img_info - image information dictionary object from openstack
+
+        Returns:
+            The ImageInfoItem
+        """
+        img = RwcalYang.ImageInfoItem()
+        img.name = img_info['name']
+        img.id = img_info['id']
+        img.checksum = img_info['checksum']
+        img.disk_format = img_info['disk_format']
+        img.container_format = img_info['container_format']
+        if img_info['status'] == 'active':
+            img.state = 'active'
+        else:
+            img.state = 'inactive'
+        return img
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """Return a list of the names of all available images.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            The the list of images in VimResources object
+        """
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            images = drv.glance_image_list()
+        for img in images:
+            response.imageinfo_list.append(RwcalOpenstackPlugin._fill_image_info(img))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        """Return a image information.
+
+        Arguments:
+            account - a cloud account
+            image_id - an id of the image
+
+        Returns:
+            ImageInfoItem object containing image information.
+        """
+        with self._use_driver(account) as drv:
+            image = drv.glance_image_get(image_id)
+        return RwcalOpenstackPlugin._fill_image_info(image)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vm(self, account, vminfo):
+        """Create a new virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vminfo - information that defines the type of VM to create
+
+        Returns:
+            The image id
+        """
+        kwargs = {}
+        kwargs['name']      = vminfo.vm_name
+        kwargs['flavor_id'] = vminfo.flavor_id
+        kwargs['image_id']  = vminfo.image_id
+
+        with self._use_driver(account) as drv:
+            ### If floating_ip is required and we don't have one, better fail before any further allocation
+            if vminfo.has_field('allocate_public_address') and vminfo.allocate_public_address:
+                if account.openstack.has_field('floating_ip_pool'):
+                    pool_name = account.openstack.floating_ip_pool
+                else:
+                    pool_name = None
+                floating_ip = self._allocate_floating_ip(drv, pool_name)
+            else:
+                floating_ip = None
+
+        if vminfo.has_field('cloud_init') and vminfo.cloud_init.has_field('userdata'):
+            kwargs['userdata']  = vminfo.cloud_init.userdata
+        else:
+            kwargs['userdata'] = ''
+
+        if account.openstack.security_groups:
+            kwargs['security_groups'] = account.openstack.security_groups
+
+        port_list = []
+        for port in vminfo.port_list:
+            port_list.append(port.port_id)
+
+        if port_list:
+            kwargs['port_list'] = port_list
+
+        network_list = []
+        for network in vminfo.network_list:
+            network_list.append(network.network_id)
+
+        if network_list:
+            kwargs['network_list'] = network_list
+
+        metadata = {}
+        for field in vminfo.user_tags.fields:
+            if vminfo.user_tags.has_field(field):
+                metadata[field] = getattr(vminfo.user_tags, field)
+        kwargs['metadata']  = metadata
+
+        if vminfo.has_field('availability_zone'):
+            kwargs['availability_zone']  = vminfo.availability_zone
+        else:
+            kwargs['availability_zone'] = None
+
+        if vminfo.has_field('server_group'):
+            kwargs['scheduler_hints'] = {'group': vminfo.server_group }
+        else:
+            kwargs['scheduler_hints'] = None
+
+        with self._use_driver(account) as drv:
+            vm_id = drv.nova_server_create(**kwargs)
+            if floating_ip:
+                self.prepare_vdu_on_boot(account, vm_id, floating_ip)
+
+        return vm_id
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """Start an existing virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        with self._use_driver(account) as drv:
+            drv.nova_server_start(vm_id)
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """Stop a running virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        with self._use_driver(account) as drv:
+            drv.nova_server_stop(vm_id)
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """Delete a virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        with self._use_driver(account) as drv:
+            drv.nova_server_delete(vm_id)
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """Reboot a virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        with self._use_driver(account) as drv:
+            drv.nova_server_reboot(vm_id)
+
+    @staticmethod
+    def _fill_vm_info(vm_info, mgmt_network):
+        """Create a GI object from vm info dictionary
+
+        Converts VM information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            vm_info - VM information from openstack
+            mgmt_network - Management network
+
+        Returns:
+            Protobuf Gi object for VM
+        """
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_id     = vm_info['id']
+        vm.vm_name   = vm_info['name']
+        vm.image_id  = vm_info['image']['id']
+        vm.flavor_id = vm_info['flavor']['id']
+        vm.state     = vm_info['status']
+        for network_name, network_info in vm_info['addresses'].items():
+            if network_info:
+                if network_name == mgmt_network:
+                    vm.public_ip = next((item['addr']
+                                            for item in network_info
+                                            if item['OS-EXT-IPS:type'] == 'floating'),
+                                        network_info[0]['addr'])
+                    vm.management_ip = network_info[0]['addr']
+                else:
+                    for interface in network_info:
+                        addr = vm.private_ip_list.add()
+                        addr.ip_address = interface['addr']
+
+        for network_name, network_info in vm_info['addresses'].items():
+            if network_info and network_name == mgmt_network and not vm.public_ip:
+                for interface in network_info:
+                    if 'OS-EXT-IPS:type' in interface and interface['OS-EXT-IPS:type'] == 'floating':
+                        vm.public_ip = interface['addr']
+
+        # Look for any metadata
+        for key, value in vm_info['metadata'].items():
+            if key in vm.user_tags.fields:
+                setattr(vm.user_tags, key, value)
+        if 'OS-EXT-SRV-ATTR:host' in vm_info:
+            if vm_info['OS-EXT-SRV-ATTR:host'] != None:
+                vm.host_name = vm_info['OS-EXT-SRV-ATTR:host']
+        if 'OS-EXT-AZ:availability_zone' in vm_info:
+            if vm_info['OS-EXT-AZ:availability_zone'] != None:
+                vm.availability_zone = vm_info['OS-EXT-AZ:availability_zone']
+        return vm
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        """Return a list of the VMs as vala boxed objects
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List containing VM information
+        """
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            vms = drv.nova_server_list()
+        for vm in vms:
+            response.vminfo_list.append(RwcalOpenstackPlugin._fill_vm_info(vm, account.openstack.mgmt_network))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vm(self, account, id):
+        """Return vm information.
+
+        Arguments:
+            account - a cloud account
+            id - an id for the VM
+
+        Returns:
+            VM information
+        """
+        with self._use_driver(account) as drv:
+            vm = drv.nova_server_get(id)
+        return RwcalOpenstackPlugin._fill_vm_info(vm, account.openstack.mgmt_network)
+
+    @staticmethod
+    def _get_guest_epa_specs(guest_epa):
+        """
+        Returns EPA Specs dictionary for guest_epa attributes
+        """
+        epa_specs = {}
+        if guest_epa.has_field('mempage_size'):
+            mempage_size = espec_utils.guest.mano_to_extra_spec_mempage_size(guest_epa.mempage_size)
+            if mempage_size is not None:
+                epa_specs['hw:mem_page_size'] = mempage_size
+
+        if guest_epa.has_field('cpu_pinning_policy'):
+            cpu_pinning_policy = espec_utils.guest.mano_to_extra_spec_cpu_pinning_policy(guest_epa.cpu_pinning_policy)
+            if cpu_pinning_policy is not None:
+                epa_specs['hw:cpu_policy'] = cpu_pinning_policy
+
+        if guest_epa.has_field('cpu_thread_pinning_policy'):
+            cpu_thread_pinning_policy = espec_utils.guest.mano_to_extra_spec_cpu_thread_pinning_policy(guest_epa.cpu_thread_pinning_policy)
+            if cpu_thread_pinning_policy is None:
+                epa_specs['hw:cpu_threads_policy'] = cpu_thread_pinning_policy
+
+        if guest_epa.has_field('trusted_execution'):
+            trusted_execution = espec_utils.guest.mano_to_extra_spec_trusted_execution(guest_epa.trusted_execution)
+            if trusted_execution is not None:
+                epa_specs['trust:trusted_host'] = trusted_execution
+
+        if guest_epa.has_field('numa_node_policy'):
+            if guest_epa.numa_node_policy.has_field('node_cnt'):
+                numa_node_count = espec_utils.guest.mano_to_extra_spec_numa_node_count(guest_epa.numa_node_policy.node_cnt)
+                if numa_node_count is not None:
+                    epa_specs['hw:numa_nodes'] = numa_node_count
+
+            if guest_epa.numa_node_policy.has_field('mem_policy'):
+                numa_memory_policy = espec_utils.guest.mano_to_extra_spec_numa_memory_policy(guest_epa.numa_node_policy.mem_policy)
+                if numa_memory_policy is not None:
+                    epa_specs['hw:numa_mempolicy'] = numa_memory_policy
+
+            if guest_epa.numa_node_policy.has_field('node'):
+                for node in guest_epa.numa_node_policy.node:
+                    if node.has_field('vcpu') and node.vcpu:
+                        epa_specs['hw:numa_cpus.'+str(node.id)] = ','.join([str(j) for j in node.vcpu])
+                    if node.memory_mb:
+                        epa_specs['hw:numa_mem.'+str(node.id)] = str(node.memory_mb)
+
+        if guest_epa.has_field('pcie_device'):
+            pci_devices = []
+            for device in guest_epa.pcie_device:
+                pci_devices.append(device.device_id +':'+str(device.count))
+            epa_specs['pci_passthrough:alias'] = ','.join(pci_devices)
+
+        return epa_specs
+
+    @staticmethod
+    def _get_host_epa_specs(host_epa):
+        """
+        Returns EPA Specs dictionary for host_epa attributes
+        """
+
+        epa_specs = {}
+
+        if host_epa.has_field('cpu_model'):
+            cpu_model = espec_utils.host.mano_to_extra_spec_cpu_model(host_epa.cpu_model)
+            if cpu_model is not None:
+                epa_specs['capabilities:cpu_info:model'] = cpu_model
+
+        if host_epa.has_field('cpu_arch'):
+            cpu_arch = espec_utils.host.mano_to_extra_spec_cpu_arch(host_epa.cpu_arch)
+            if cpu_arch is not None:
+                epa_specs['capabilities:cpu_info:arch'] = cpu_arch
+
+        if host_epa.has_field('cpu_vendor'):
+            cpu_vendor = espec_utils.host.mano_to_extra_spec_cpu_vendor(host_epa.cpu_vendor)
+            if cpu_vendor is not None:
+                epa_specs['capabilities:cpu_info:vendor'] = cpu_vendor
+
+        if host_epa.has_field('cpu_socket_count'):
+            cpu_socket_count = espec_utils.host.mano_to_extra_spec_cpu_socket_count(host_epa.cpu_socket_count)
+            if cpu_socket_count is not None:
+                epa_specs['capabilities:cpu_info:topology:sockets'] = cpu_socket_count
+
+        if host_epa.has_field('cpu_core_count'):
+            cpu_core_count = espec_utils.host.mano_to_extra_spec_cpu_core_count(host_epa.cpu_core_count)
+            if cpu_core_count is not None:
+                epa_specs['capabilities:cpu_info:topology:cores'] = cpu_core_count
+
+        if host_epa.has_field('cpu_core_thread_count'):
+            cpu_core_thread_count = espec_utils.host.mano_to_extra_spec_cpu_core_thread_count(host_epa.cpu_core_thread_count)
+            if cpu_core_thread_count is not None:
+                epa_specs['capabilities:cpu_info:topology:threads'] = cpu_core_thread_count
+
+        if host_epa.has_field('cpu_feature'):
+            cpu_features = []
+            espec_cpu_features = []
+            for feature in host_epa.cpu_feature:
+                cpu_features.append(feature)
+            espec_cpu_features = espec_utils.host.mano_to_extra_spec_cpu_features(cpu_features)
+            if espec_cpu_features is not None:
+                epa_specs['capabilities:cpu_info:features'] = espec_cpu_features
+        return epa_specs
+
+    @staticmethod
+    def _get_hypervisor_epa_specs(guest_epa):
+        """
+        Returns EPA Specs dictionary for hypervisor_epa attributes
+        """
+        hypervisor_epa = {}
+        return hypervisor_epa
+
+    @staticmethod
+    def _get_vswitch_epa_specs(guest_epa):
+        """
+        Returns EPA Specs dictionary for vswitch_epa attributes
+        """
+        vswitch_epa = {}
+        return vswitch_epa
+
+    @staticmethod
+    def _get_host_aggregate_epa_specs(host_aggregate):
+        """
+        Returns EPA Specs dictionary for host aggregates
+        """
+        epa_specs = {}
+        for aggregate in host_aggregate:
+            epa_specs['aggregate_instance_extra_specs:'+aggregate.metadata_key] = aggregate.metadata_value
+
+        return epa_specs
+
+    @staticmethod
+    def _get_epa_specs(flavor):
+        """
+        Returns epa_specs dictionary based on flavor information
+        """
+        epa_specs = {}
+        if flavor.has_field('guest_epa'):
+            guest_epa = RwcalOpenstackPlugin._get_guest_epa_specs(flavor.guest_epa)
+            epa_specs.update(guest_epa)
+        if flavor.has_field('host_epa'):
+            host_epa = RwcalOpenstackPlugin._get_host_epa_specs(flavor.host_epa)
+            epa_specs.update(host_epa)
+        if flavor.has_field('hypervisor_epa'):
+            hypervisor_epa = RwcalOpenstackPlugin._get_hypervisor_epa_specs(flavor.hypervisor_epa)
+            epa_specs.update(hypervisor_epa)
+        if flavor.has_field('vswitch_epa'):
+            vswitch_epa = RwcalOpenstackPlugin._get_vswitch_epa_specs(flavor.vswitch_epa)
+            epa_specs.update(vswitch_epa)
+        if flavor.has_field('host_aggregate'):
+            host_aggregate = RwcalOpenstackPlugin._get_host_aggregate_epa_specs(flavor.host_aggregate)
+            epa_specs.update(host_aggregate)
+        return epa_specs
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_flavor(self, account, flavor):
+        """Create new flavor.
+
+        Arguments:
+            account - a cloud account
+            flavor - flavor of the VM
+
+        Returns:
+            flavor id
+        """
+        epa_specs = RwcalOpenstackPlugin._get_epa_specs(flavor)
+        with self._use_driver(account) as drv:
+            return drv.nova_flavor_create(name      = flavor.name,
+                                          ram       = flavor.vm_flavor.memory_mb,
+                                          vcpus     = flavor.vm_flavor.vcpu_count,
+                                          disk      = flavor.vm_flavor.storage_gb,
+                                          epa_specs = epa_specs)
+
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """Delete flavor.
+
+        Arguments:
+            account - a cloud account
+            flavor_id - id flavor of the VM
+        """
+        with self._use_driver(account) as drv:
+            drv.nova_flavor_delete(flavor_id)
+
+    @staticmethod
+    def _fill_epa_attributes(flavor, flavor_info):
+        """Helper function to populate the EPA attributes
+
+        Arguments:
+              flavor     : Object with EPA attributes
+              flavor_info: A dictionary of flavor_info received from openstack
+        Returns:
+              None
+        """
+        getattr(flavor, 'vm_flavor').vcpu_count  = flavor_info['vcpus']
+        getattr(flavor, 'vm_flavor').memory_mb   = flavor_info['ram']
+        getattr(flavor, 'vm_flavor').storage_gb  = flavor_info['disk']
+
+        ### If extra_specs in flavor_info
+        if not 'extra_specs' in flavor_info:
+            return
+
+        for attr in flavor_info['extra_specs']:
+            if attr == 'hw:cpu_policy':
+                cpu_pinning_policy = espec_utils.guest.extra_spec_to_mano_cpu_pinning_policy(flavor_info['extra_specs']['hw:cpu_policy'])
+                if cpu_pinning_policy is not None:
+                    getattr(flavor, 'guest_epa').cpu_pinning_policy = cpu_pinning_policy
+
+            elif attr == 'hw:cpu_threads_policy':
+                cpu_thread_pinning_policy = espec_utils.guest.extra_spec_to_mano_cpu_thread_pinning_policy(flavor_info['extra_specs']['hw:cpu_threads_policy'])
+                if cpu_thread_pinning_policy is not None:
+                    getattr(flavor, 'guest_epa').cpu_thread_pinning_policy = cpu_thread_pinning_policy
+
+            elif attr == 'hw:mem_page_size':
+                mempage_size = espec_utils.guest.extra_spec_to_mano_mempage_size(flavor_info['extra_specs']['hw:mem_page_size'])
+                if mempage_size is not None:
+                    getattr(flavor, 'guest_epa').mempage_size = mempage_size
+
+
+            elif attr == 'hw:numa_nodes':
+                numa_node_count = espec_utils.guest.extra_specs_to_mano_numa_node_count(flavor_info['extra_specs']['hw:numa_nodes'])
+                if numa_node_count is not None:
+                    getattr(flavor,'guest_epa').numa_node_policy.node_cnt = numa_node_count
+
+            elif attr.startswith('hw:numa_cpus.'):
+                node_id = attr.split('.')[1]
+                nodes = [ n for n in flavor.guest_epa.numa_node_policy.node if n.id == int(node_id) ]
+                if nodes:
+                    numa_node = nodes[0]
+                else:
+                    numa_node = getattr(flavor,'guest_epa').numa_node_policy.node.add()
+                    numa_node.id = int(node_id)
+
+                numa_node.vcpu = [ int(x) for x in flavor_info['extra_specs'][attr].split(',') ]
+
+            elif attr.startswith('hw:numa_mem.'):
+                node_id = attr.split('.')[1]
+                nodes = [ n for n in flavor.guest_epa.numa_node_policy.node if n.id == int(node_id) ]
+                if nodes:
+                    numa_node = nodes[0]
+                else:
+                    numa_node = getattr(flavor,'guest_epa').numa_node_policy.node.add()
+                    numa_node.id = int(node_id)
+
+                numa_node.memory_mb =  int(flavor_info['extra_specs'][attr])
+
+            elif attr == 'hw:numa_mempolicy':
+                numa_memory_policy = espec_utils.guest.extra_to_mano_spec_numa_memory_policy(flavor_info['extra_specs']['hw:numa_mempolicy'])
+                if numa_memory_policy is not None:
+                    getattr(flavor,'guest_epa').numa_node_policy.mem_policy = numa_memory_policy
+
+            elif attr == 'trust:trusted_host':
+                trusted_execution = espec_utils.guest.extra_spec_to_mano_trusted_execution(flavor_info['extra_specs']['trust:trusted_host'])
+                if trusted_execution is not None:
+                    getattr(flavor,'guest_epa').trusted_execution = trusted_execution
+
+            elif attr == 'pci_passthrough:alias':
+                device_types = flavor_info['extra_specs']['pci_passthrough:alias']
+                for device in device_types.split(','):
+                    dev = getattr(flavor,'guest_epa').pcie_device.add()
+                    dev.device_id = device.split(':')[0]
+                    dev.count = int(device.split(':')[1])
+
+            elif attr == 'capabilities:cpu_info:model':
+                cpu_model = espec_utils.host.extra_specs_to_mano_cpu_model(flavor_info['extra_specs']['capabilities:cpu_info:model'])
+                if cpu_model is not None:
+                    getattr(flavor, 'host_epa').cpu_model = cpu_model
+
+            elif attr == 'capabilities:cpu_info:arch':
+                cpu_arch = espec_utils.host.extra_specs_to_mano_cpu_arch(flavor_info['extra_specs']['capabilities:cpu_info:arch'])
+                if cpu_arch is not None:
+                    getattr(flavor, 'host_epa').cpu_arch = cpu_arch
+
+            elif attr == 'capabilities:cpu_info:vendor':
+                cpu_vendor = espec_utils.host.extra_spec_to_mano_cpu_vendor(flavor_info['extra_specs']['capabilities:cpu_info:vendor'])
+                if cpu_vendor is not None:
+                    getattr(flavor, 'host_epa').cpu_vendor = cpu_vendor
+
+            elif attr == 'capabilities:cpu_info:topology:sockets':
+                cpu_sockets = espec_utils.host.extra_spec_to_mano_cpu_socket_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:sockets'])
+                if cpu_sockets is not None:
+                    getattr(flavor, 'host_epa').cpu_socket_count = cpu_sockets
+
+            elif attr == 'capabilities:cpu_info:topology:cores':
+                cpu_cores = espec_utils.host.extra_spec_to_mano_cpu_core_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:cores'])
+                if cpu_cores is not None:
+                    getattr(flavor, 'host_epa').cpu_core_count = cpu_cores
+
+            elif attr == 'capabilities:cpu_info:topology:threads':
+                cpu_threads = espec_utils.host.extra_spec_to_mano_cpu_core_thread_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:threads'])
+                if cpu_threads is not None:
+                    getattr(flavor, 'host_epa').cpu_core_thread_count = cpu_threads
+
+            elif attr == 'capabilities:cpu_info:features':
+                cpu_features = espec_utils.host.extra_spec_to_mano_cpu_features(flavor_info['extra_specs']['capabilities:cpu_info:features'])
+                if cpu_features is not None:
+                    for feature in cpu_features:
+                        getattr(flavor, 'host_epa').cpu_feature.append(feature)
+            elif attr.startswith('aggregate_instance_extra_specs:'):
+                    aggregate = getattr(flavor, 'host_aggregate').add()
+                    aggregate.metadata_key = ":".join(attr.split(':')[1::])
+                    aggregate.metadata_value = flavor_info['extra_specs'][attr]
+
+    @staticmethod
+    def _fill_flavor_info(flavor_info):
+        """Create a GI object from flavor info dictionary
+
+        Converts Flavor information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            flavor_info: Flavor information from openstack
+
+        Returns:
+             Object of class FlavorInfoItem
+        """
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name                       = flavor_info['name']
+        flavor.id                         = flavor_info['id']
+        RwcalOpenstackPlugin._fill_epa_attributes(flavor, flavor_info)
+        return flavor
+
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """Return flavor information.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of flavors
+        """
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            flavors = drv.nova_flavor_list()
+        for flv in flavors:
+            response.flavorinfo_list.append(RwcalOpenstackPlugin._fill_flavor_info(flv))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, id):
+        """Return flavor information.
+
+        Arguments:
+            account - a cloud account
+            id - an id for the flavor
+
+        Returns:
+            Flavor info item
+        """
+        with self._use_driver(account) as drv:
+            flavor = drv.nova_flavor_get(id)
+        return RwcalOpenstackPlugin._fill_flavor_info(flavor)
+
+
+    def _fill_network_info(self, network_info, account):
+        """Create a GI object from network info dictionary
+
+        Converts Network information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            network_info - Network information from openstack
+            account - a cloud account
+
+        Returns:
+            Network info item
+        """
+        network                  = RwcalYang.NetworkInfoItem()
+        network.network_name     = network_info['name']
+        network.network_id       = network_info['id']
+        if ('provider:network_type' in network_info) and (network_info['provider:network_type'] != None):
+            network.provider_network.overlay_type = network_info['provider:network_type'].upper()
+        if ('provider:segmentation_id' in network_info) and (network_info['provider:segmentation_id']):
+            network.provider_network.segmentation_id = network_info['provider:segmentation_id']
+        if ('provider:physical_network' in network_info) and (network_info['provider:physical_network']):
+            network.provider_network.physical_network = network_info['provider:physical_network'].upper()
+
+        if 'subnets' in network_info and network_info['subnets']:
+            subnet_id = network_info['subnets'][0]
+            with self._use_driver(account) as drv:
+                subnet = drv.neutron_subnet_get(subnet_id)
+            network.subnet = subnet['cidr']
+        return network
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        """Return a list of networks
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of networks
+        """
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            networks = drv.neutron_network_list()
+        for network in networks:
+            response.networkinfo_list.append(self._fill_network_info(network, account))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, id):
+        """Return a network
+
+        Arguments:
+            account - a cloud account
+            id - an id for the network
+
+        Returns:
+            Network info item
+        """
+        with self._use_driver(account) as drv:
+            network = drv.neutron_network_get(id)
+        return self._fill_network_info(network, account)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_network(self, account, network):
+        """Create a new network
+
+        Arguments:
+            account - a cloud account
+            network - Network object
+
+        Returns:
+            Network id
+        """
+        kwargs = {}
+        kwargs['name']            = network.network_name
+        kwargs['admin_state_up']  = True
+        kwargs['external_router'] = False
+        kwargs['shared']          = False
+
+        if network.has_field('provider_network'):
+            if network.provider_network.has_field('physical_network'):
+                kwargs['physical_network'] = network.provider_network.physical_network
+            if network.provider_network.has_field('overlay_type'):
+                kwargs['network_type'] = network.provider_network.overlay_type.lower()
+            if network.provider_network.has_field('segmentation_id'):
+                kwargs['segmentation_id'] = network.provider_network.segmentation_id
+
+        with self._use_driver(account) as drv:
+            network_id = drv.neutron_network_create(**kwargs)
+            drv.neutron_subnet_create(network_id = network_id,
+                                      cidr = network.subnet)
+        return network_id
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        """Delete a network
+
+        Arguments:
+            account - a cloud account
+            network_id - an id for the network
+        """
+        with self._use_driver(account) as drv:
+            drv.neutron_network_delete(network_id)
+
+    @staticmethod
+    def _fill_port_info(port_info):
+        """Create a GI object from port info dictionary
+
+        Converts Port information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            port_info - Port information from openstack
+
+        Returns:
+            Port info item
+        """
+        port = RwcalYang.PortInfoItem()
+
+        port.port_name  = port_info['name']
+        port.port_id    = port_info['id']
+        port.network_id = port_info['network_id']
+        port.port_state = port_info['status']
+        if 'device_id' in port_info:
+            port.vm_id = port_info['device_id']
+        if 'fixed_ips' in port_info:
+            port.ip_address = port_info['fixed_ips'][0]['ip_address']
+        return port
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        """Return a port
+
+        Arguments:
+            account - a cloud account
+            port_id - an id for the port
+
+        Returns:
+            Port info item
+        """
+        with self._use_driver(account) as drv:
+            port = drv.neutron_port_get(port_id)
+
+        return RwcalOpenstackPlugin._fill_port_info(port)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        """Return a list of ports
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            Port info list
+        """
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            ports = drv.neutron_port_list(*{})
+        for port in ports:
+            response.portinfo_list.append(RwcalOpenstackPlugin._fill_port_info(port))
+        return response
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_port(self, account, port):
+        """Create a new port
+
+        Arguments:
+            account - a cloud account
+            port - port object
+
+        Returns:
+            Port id
+        """
+        kwargs = {}
+        kwargs['name'] = port.port_name
+        kwargs['network_id'] = port.network_id
+        kwargs['admin_state_up'] = True
+        if port.has_field('vm_id'):
+            kwargs['vm_id'] = port.vm_id
+        if port.has_field('port_type'):
+            kwargs['port_type'] = port.port_type
+        else:
+            kwargs['port_type'] = "normal"
+
+        with self._use_driver(account) as drv:
+            return drv.neutron_port_create(**kwargs)
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        """Delete a port
+
+        Arguments:
+            account - a cloud account
+            port_id - an id for port
+        """
+        with self._use_driver(account) as drv:
+            drv.neutron_port_delete(port_id)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_add_host(self, account, host):
+        """Add a new host
+
+        Arguments:
+            account - a cloud account
+            host - a host object
+
+        Returns:
+            An id for the host
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        """Remove a host
+
+        Arguments:
+            account - a cloud account
+            host_id - an id for the host
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        """Return a host
+
+        Arguments:
+            account - a cloud account
+            host_id - an id for host
+
+        Returns:
+            Host info item
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        """Return a list of hosts
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of hosts
+        """
+        raise NotImplementedError
+
+    @staticmethod
+    def _fill_connection_point_info(c_point, port_info):
+        """Create a GI object for RwcalYang.VDUInfoParams_ConnectionPoints()
+
+        Converts Port information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            port_info - Port information from openstack
+        Returns:
+            Protobuf Gi object for RwcalYang.VDUInfoParams_ConnectionPoints
+        """
+        c_point.name = port_info['name']
+        c_point.connection_point_id = port_info['id']
+        if ('fixed_ips' in port_info) and (len(port_info['fixed_ips']) >= 1):
+            if 'ip_address' in port_info['fixed_ips'][0]:
+                c_point.ip_address = port_info['fixed_ips'][0]['ip_address']
+        if port_info['status'] == 'ACTIVE':
+            c_point.state = 'active'
+        else:
+            c_point.state = 'inactive'
+        if 'network_id' in port_info:
+            c_point.virtual_link_id = port_info['network_id']
+        if ('device_id' in port_info) and (port_info['device_id']):
+            c_point.vdu_id = port_info['device_id']
+
+    @staticmethod
+    def _fill_virtual_link_info(network_info, port_list, subnet):
+        """Create a GI object for VirtualLinkInfoParams
+
+        Converts Network and Port information dictionary object
+        returned by openstack driver into Protobuf Gi Object
+
+        Arguments:
+            network_info - Network information from openstack
+            port_list - A list of port information from openstack
+            subnet: Subnet information from openstack
+        Returns:
+            Protobuf Gi object for VirtualLinkInfoParams
+        """
+        link = RwcalYang.VirtualLinkInfoParams()
+        link.name  = network_info['name']
+        if network_info['status'] == 'ACTIVE':
+            link.state = 'active'
+        else:
+            link.state = 'inactive'
+        link.virtual_link_id = network_info['id']
+        for port in port_list:
+            if port['device_owner'] == 'compute:None':
+                c_point = link.connection_points.add()
+                RwcalOpenstackPlugin._fill_connection_point_info(c_point, port)
+
+        if subnet != None:
+            link.subnet = subnet['cidr']
+
+        if ('provider:network_type' in network_info) and (network_info['provider:network_type'] != None):
+            link.provider_network.overlay_type = network_info['provider:network_type'].upper()
+        if ('provider:segmentation_id' in network_info) and (network_info['provider:segmentation_id']):
+            link.provider_network.segmentation_id = network_info['provider:segmentation_id']
+        if ('provider:physical_network' in network_info) and (network_info['provider:physical_network']):
+            link.provider_network.physical_network = network_info['provider:physical_network'].upper()
+
+        return link
+
+    @staticmethod
+    def _fill_vdu_info(vm_info, flavor_info, mgmt_network, port_list, server_group):
+        """Create a GI object for VDUInfoParams
+
+        Converts VM information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            vm_info - VM information from openstack
+            flavor_info - VM Flavor information from openstack
+            mgmt_network - Management network
+            port_list - A list of port information from openstack
+            server_group - A list (with one element or empty list) of server group to which this VM belongs
+        Returns:
+            Protobuf Gi object for VDUInfoParams
+        """
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.name = vm_info['name']
+        vdu.vdu_id = vm_info['id']
+        for network_name, network_info in vm_info['addresses'].items():
+            if network_info and network_name == mgmt_network:
+                for interface in network_info:
+                    if 'OS-EXT-IPS:type' in interface:
+                        if interface['OS-EXT-IPS:type'] == 'fixed':
+                            vdu.management_ip = interface['addr']
+                        elif interface['OS-EXT-IPS:type'] == 'floating':
+                            vdu.public_ip = interface['addr']
+
+        # Look for any metadata
+        for key, value in vm_info['metadata'].items():
+            if key == 'node_id':
+                vdu.node_id = value
+        if ('image' in vm_info) and ('id' in vm_info['image']):
+            vdu.image_id = vm_info['image']['id']
+        if ('flavor' in vm_info) and ('id' in vm_info['flavor']):
+            vdu.flavor_id = vm_info['flavor']['id']
+
+        if vm_info['status'] == 'ACTIVE':
+            vdu.state = 'active'
+        elif vm_info['status'] == 'ERROR':
+            vdu.state = 'failed'
+        else:
+            vdu.state = 'inactive'
+
+        if 'availability_zone' in vm_info:
+            vdu.availability_zone = vm_info['availability_zone']
+
+        if server_group:
+            vdu.server_group.name = server_group[0]
+
+        vdu.cloud_type  = 'openstack'
+        # Fill the port information
+        for port in port_list:
+            c_point = vdu.connection_points.add()
+            RwcalOpenstackPlugin._fill_connection_point_info(c_point, port)
+
+        if flavor_info is not None:
+            RwcalOpenstackPlugin._fill_epa_attributes(vdu, flavor_info)
+        return vdu
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        kwargs = {}
+        kwargs['name']            = link_params.name
+        kwargs['admin_state_up']  = True
+        kwargs['external_router'] = False
+        kwargs['shared']          = False
+
+        if link_params.has_field('provider_network'):
+            if link_params.provider_network.has_field('physical_network'):
+                kwargs['physical_network'] = link_params.provider_network.physical_network
+            if link_params.provider_network.has_field('overlay_type'):
+                kwargs['network_type'] = link_params.provider_network.overlay_type.lower()
+            if link_params.provider_network.has_field('segmentation_id'):
+                kwargs['segmentation_id'] = link_params.provider_network.segmentation_id
+
+
+        with self._use_driver(account) as drv:
+            try:
+                network_id = drv.neutron_network_create(**kwargs)
+            except Exception as e:
+                self.log.error("Encountered exceptions during network creation. Exception: %s", str(e))
+                raise
+            
+            kwargs = {'network_id' : network_id,
+                      'dhcp_params': {'enable_dhcp': True},
+                      'gateway_ip' : None,}
+            
+            if link_params.ip_profile_params.has_field('ip_version'):
+                kwargs['ip_version'] = 6 if link_params.ip_profile_params.ip_version == 'ipv6' else 4
+            else:
+                kwargs['ip_version'] = 4
+
+            if link_params.ip_profile_params.has_field('subnet_address'):
+                kwargs['cidr'] = link_params.ip_profile_params.subnet_address
+            elif link_params.ip_profile_params.has_field('subnet_prefix_pool'):
+                subnet_pool = drv.netruon_subnetpool_by_name(link_params.ip_profile_params.subnet_prefix_pool)
+                if subnet_pool is None:
+                    self.log.error("Could not find subnet pool with name :%s to be used for network: %s",
+                                   link_params.ip_profile_params.subnet_prefix_pool,
+                                   link_params.name)
+                    raise NeutronException.NotFound("SubnetPool with name %s not found"%(link_params.ip_profile_params.subnet_prefix_pool))
+                
+                kwargs['subnetpool_id'] = subnet_pool['id']
+            elif link_params.has_field('subnet'):
+                kwargs['cidr'] = link_params.subnet
+            else:
+                assert 0, "No IP Prefix or Pool name specified"
+
+            if link_params.ip_profile_params.has_field('dhcp_params'):
+                if link_params.ip_profile_params.dhcp_params.has_field('enabled'):
+                    kwargs['dhcp_params']['enable_dhcp'] = link_params.ip_profile_params.dhcp_params.enabled
+                if link_params.ip_profile_params.dhcp_params.has_field('start_address'):
+                    kwargs['dhcp_params']['start_address']  = link_params.ip_profile_params.dhcp_params.start_address
+                if link_params.ip_profile_params.dhcp_params.has_field('count'):
+                    kwargs['dhcp_params']['count']  = link_params.ip_profile_params.dhcp_params.count
+    
+            if link_params.ip_profile_params.has_field('dns_server'):
+                kwargs['dns_server'] = []
+                for server in link_params.ip_profile_params.dns_server:
+                    kwargs['dns_server'].append(server)
+
+            if link_params.ip_profile_params.has_field('gateway_address'):
+                kwargs['gateway_ip'] = link_params.ip_profile_params.gateway_address
+                
+            drv.neutron_subnet_create(**kwargs)
+            
+        return network_id
+
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        """Delete a virtual link
+
+        Arguments:
+            account - a cloud account
+            link_id - id for the virtual-link to be deleted
+
+        Returns:
+            None
+        """
+        if not link_id:
+            self.log.error("Empty link_id during the virtual link deletion")
+            raise Exception("Empty link_id during the virtual link deletion")
+
+        with self._use_driver(account) as drv:
+            port_list = drv.neutron_port_list(**{'network_id': link_id})
+
+        for port in port_list:
+            if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
+                self.do_delete_port(account, port['id'], no_rwstatus=True)
+        self.do_delete_network(account, link_id, no_rwstatus=True)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        """Get information about virtual link.
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link
+
+        Returns:
+            Object of type RwcalYang.VirtualLinkInfoParams
+        """
+        if not link_id:
+            self.log.error("Empty link_id during the virtual link get request")
+            raise Exception("Empty link_id during the virtual link get request")
+
+        with self._use_driver(account) as drv:
+            network = drv.neutron_network_get(link_id)
+            if network:
+                port_list = drv.neutron_port_list(**{'network_id': network['id']})
+                if 'subnets' in network:
+                    subnet = drv.neutron_subnet_get(network['subnets'][0])
+                else:
+                    subnet = None
+                virtual_link = RwcalOpenstackPlugin._fill_virtual_link_info(network, port_list, subnet)
+            else:
+                virtual_link = None
+            return virtual_link
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link_list(self, account):
+        """Get information about all the virtual links
+
+        Arguments:
+            account  - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VirtualLinkInfoParams
+        """
+        vnf_resources = RwcalYang.VNFResources()
+        with self._use_driver(account) as drv:
+            networks = drv.neutron_network_list()
+            for network in networks:
+                port_list = drv.neutron_port_list(**{'network_id': network['id']})
+                if ('subnets' in network) and (network['subnets']):
+                    subnet = drv.neutron_subnet_get(network['subnets'][0])
+                else:
+                    subnet = None
+                virtual_link = RwcalOpenstackPlugin._fill_virtual_link_info(network, port_list, subnet)
+                vnf_resources.virtual_link_info_list.append(virtual_link)
+            return vnf_resources
+
+    def _create_connection_point(self, account, c_point):
+        """
+        Create a connection point
+        Arguments:
+           account  - a cloud account
+           c_point  - connection_points
+        """
+        kwargs = {}
+        kwargs['name'] = c_point.name
+        kwargs['network_id'] = c_point.virtual_link_id
+        kwargs['admin_state_up'] = True
+
+        if c_point.type_yang == 'VIRTIO':
+            kwargs['port_type'] = 'normal'
+        elif c_point.type_yang == 'SR_IOV':
+            kwargs['port_type'] = 'direct'
+        else:
+            raise NotImplementedError("Port Type: %s not supported" %(c_point.port_type))
+
+        with self._use_driver(account) as drv:
+            if c_point.has_field('security_group'):
+                group = drv.neutron_security_group_by_name(c_point.security_group)
+                if group is not None:
+                    kwargs['security_groups'] = [group['id']]
+            return drv.neutron_port_create(**kwargs)
+
+    def _allocate_floating_ip(self, drv, pool_name):
+        """
+        Allocate a floating_ip. If unused floating_ip exists then its reused.
+        Arguments:
+          drv:       OpenstackDriver instance
+          pool_name: Floating IP pool name
+
+        Returns:
+          An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+        """
+
+        # available_ip = [ ip for ip in drv.nova_floating_ip_list() if ip.instance_id == None ]
+
+        # if pool_name is not None:
+        #     ### Filter further based on IP address
+        #     available_ip = [ ip for ip in available_ip if ip.pool == pool_name ]
+
+        # if not available_ip:
+        #     floating_ip = drv.nova_floating_ip_create(pool_name)
+        # else:
+        #     floating_ip = available_ip[0]
+
+        floating_ip = drv.nova_floating_ip_create(pool_name)
+        return floating_ip
+
+    def _match_vm_flavor(self, required, available):
+        self.log.info("Matching VM Flavor attributes")
+        if available.vcpu_count != required.vcpu_count:
+            self.log.debug("VCPU requirement mismatch. Required: %d, Available: %d",
+                            required.vcpu_count,
+                            available.vcpu_count)
+            return False
+        if available.memory_mb != required.memory_mb:
+            self.log.debug("Memory requirement mismatch. Required: %d MB, Available: %d MB",
+                            required.memory_mb,
+                            available.memory_mb)
+            return False
+        if available.storage_gb != required.storage_gb:
+            self.log.debug("Storage requirement mismatch. Required: %d GB, Available: %d GB",
+                            required.storage_gb,
+                            available.storage_gb)
+            return False
+        self.log.debug("VM Flavor match found")
+        return True
+
+    def _match_guest_epa(self, required, available):
+        self.log.info("Matching Guest EPA attributes")
+        if required.has_field('pcie_device'):
+            self.log.debug("Matching pcie_device")
+            if available.has_field('pcie_device') == False:
+                self.log.debug("Matching pcie_device failed. Not available in flavor")
+                return False
+            else:
+                for dev in required.pcie_device:
+                    if not [ d for d in available.pcie_device
+                             if ((d.device_id == dev.device_id) and (d.count == dev.count)) ]:
+                        self.log.debug("Matching pcie_device failed. Required: %s, Available: %s", required.pcie_device, available.pcie_device)
+                        return False
+        elif available.has_field('pcie_device'):
+            self.log.debug("Rejecting available flavor because pcie_device not required but available")
+            return False
+                        
+                    
+        if required.has_field('mempage_size'):
+            self.log.debug("Matching mempage_size")
+            if available.has_field('mempage_size') == False:
+                self.log.debug("Matching mempage_size failed. Not available in flavor")
+                return False
+            else:
+                if required.mempage_size != available.mempage_size:
+                    self.log.debug("Matching mempage_size failed. Required: %s, Available: %s", required.mempage_size, available.mempage_size)
+                    return False
+        elif available.has_field('mempage_size'):
+            self.log.debug("Rejecting available flavor because mempage_size not required but available")
+            return False
+        
+        if required.has_field('cpu_pinning_policy'):
+            self.log.debug("Matching cpu_pinning_policy")
+            if required.cpu_pinning_policy != 'ANY':
+                if available.has_field('cpu_pinning_policy') == False:
+                    self.log.debug("Matching cpu_pinning_policy failed. Not available in flavor")
+                    return False
+                else:
+                    if required.cpu_pinning_policy != available.cpu_pinning_policy:
+                        self.log.debug("Matching cpu_pinning_policy failed. Required: %s, Available: %s", required.cpu_pinning_policy, available.cpu_pinning_policy)
+                        return False
+        elif available.has_field('cpu_pinning_policy'):
+            self.log.debug("Rejecting available flavor because cpu_pinning_policy not required but available")
+            return False
+        
+        if required.has_field('cpu_thread_pinning_policy'):
+            self.log.debug("Matching cpu_thread_pinning_policy")
+            if available.has_field('cpu_thread_pinning_policy') == False:
+                self.log.debug("Matching cpu_thread_pinning_policy failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_thread_pinning_policy != available.cpu_thread_pinning_policy:
+                    self.log.debug("Matching cpu_thread_pinning_policy failed. Required: %s, Available: %s", required.cpu_thread_pinning_policy, available.cpu_thread_pinning_policy)
+                    return False
+        elif available.has_field('cpu_thread_pinning_policy'):
+            self.log.debug("Rejecting available flavor because cpu_thread_pinning_policy not required but available")
+            return False
+
+        if required.has_field('trusted_execution'):
+            self.log.debug("Matching trusted_execution")
+            if required.trusted_execution == True:
+                if available.has_field('trusted_execution') == False:
+                    self.log.debug("Matching trusted_execution failed. Not available in flavor")
+                    return False
+                else:
+                    if required.trusted_execution != available.trusted_execution:
+                        self.log.debug("Matching trusted_execution failed. Required: %s, Available: %s", required.trusted_execution, available.trusted_execution)
+                        return False
+        elif available.has_field('trusted_execution'):
+            self.log.debug("Rejecting available flavor because trusted_execution not required but available")
+            return False
+        
+        if required.has_field('numa_node_policy'):
+            self.log.debug("Matching numa_node_policy")
+            if available.has_field('numa_node_policy') == False:
+                self.log.debug("Matching numa_node_policy failed. Not available in flavor")
+                return False
+            else:
+                if required.numa_node_policy.has_field('node_cnt'):
+                    self.log.debug("Matching numa_node_policy node_cnt")
+                    if available.numa_node_policy.has_field('node_cnt') == False:
+                        self.log.debug("Matching numa_node_policy node_cnt failed. Not available in flavor")
+                        return False
+                    else:
+                        if required.numa_node_policy.node_cnt != available.numa_node_policy.node_cnt:
+                            self.log.debug("Matching numa_node_policy node_cnt failed. Required: %s, Available: %s",required.numa_node_policy.node_cnt, available.numa_node_policy.node_cnt)
+                            return False
+                elif available.numa_node_policy.has_field('node_cnt'):
+                    self.log.debug("Rejecting available flavor because numa node count not required but available")
+                    return False
+                
+                if required.numa_node_policy.has_field('mem_policy'):
+                    self.log.debug("Matching numa_node_policy mem_policy")
+                    if available.numa_node_policy.has_field('mem_policy') == False:
+                        self.log.debug("Matching numa_node_policy mem_policy failed. Not available in flavor")
+                        return False
+                    else:
+                        if required.numa_node_policy.mem_policy != available.numa_node_policy.mem_policy:
+                            self.log.debug("Matching numa_node_policy mem_policy failed. Required: %s, Available: %s", required.numa_node_policy.mem_policy, available.numa_node_policy.mem_policy)
+                            return False
+                elif available.numa_node_policy.has_field('mem_policy'):
+                    self.log.debug("Rejecting available flavor because num node mem_policy not required but available")
+                    return False
+
+                if required.numa_node_policy.has_field('node'):
+                    self.log.debug("Matching numa_node_policy nodes configuration")
+                    if available.numa_node_policy.has_field('node') == False:
+                        self.log.debug("Matching numa_node_policy nodes configuration failed. Not available in flavor")
+                        return False
+                    for required_node in required.numa_node_policy.node:
+                        self.log.debug("Matching numa_node_policy nodes configuration for node %s", required_node)
+                        numa_match = False
+                        for available_node in available.numa_node_policy.node:
+                            if required_node.id != available_node.id:
+                                self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
+                                continue
+                            if required_node.vcpu != available_node.vcpu:
+                                self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
+                                continue
+                            if required_node.memory_mb != available_node.memory_mb:
+                                self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
+                                continue
+                            numa_match = True
+                        if numa_match == False:
+                            return False
+                elif available.numa_node_policy.has_field('node'):
+                    self.log.debug("Rejecting available flavor because numa nodes not required but available")
+                    return False
+        elif available.has_field('numa_node_policy'):
+            self.log.debug("Rejecting available flavor because numa_node_policy not required but available")
+            return False
+        self.log.info("Successful match for Guest EPA attributes")
+        return True
+
+    def _match_vswitch_epa(self, required, available):
+        self.log.debug("VSwitch EPA match found")
+        return True
+
+    def _match_hypervisor_epa(self, required, available):
+        self.log.debug("Hypervisor EPA match found")
+        return True
+
+    def _match_host_epa(self, required, available):
+        self.log.info("Matching Host EPA attributes")
+        if required.has_field('cpu_model'):
+            self.log.debug("Matching CPU model")
+            if available.has_field('cpu_model') == False:
+                self.log.debug("Matching CPU model failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_model.replace('PREFER', 'REQUIRE') != available.cpu_model:
+                    self.log.debug("Matching CPU model failed. Required: %s, Available: %s", required.cpu_model, available.cpu_model)
+                    return False
+        elif available.has_field('cpu_model'):
+            self.log.debug("Rejecting available flavor because cpu_model not required but available")
+            return False
+        
+        if required.has_field('cpu_arch'):
+            self.log.debug("Matching CPU architecture")
+            if available.has_field('cpu_arch') == False:
+                self.log.debug("Matching CPU architecture failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_arch.replace('PREFER', 'REQUIRE') != available.cpu_arch:
+                    self.log.debug("Matching CPU architecture failed. Required: %s, Available: %s", required.cpu_arch, available.cpu_arch)
+                    return False
+        elif available.has_field('cpu_arch'):
+            self.log.debug("Rejecting available flavor because cpu_arch not required but available")
+            return False
+        
+        if required.has_field('cpu_vendor'):
+            self.log.debug("Matching CPU vendor")
+            if available.has_field('cpu_vendor') == False:
+                self.log.debug("Matching CPU vendor failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_vendor.replace('PREFER', 'REQUIRE') != available.cpu_vendor:
+                    self.log.debug("Matching CPU vendor failed. Required: %s, Available: %s", required.cpu_vendor, available.cpu_vendor)
+                    return False
+        elif available.has_field('cpu_vendor'):
+            self.log.debug("Rejecting available flavor because cpu_vendor not required but available")
+            return False
+
+        if required.has_field('cpu_socket_count'):
+            self.log.debug("Matching CPU socket count")
+            if available.has_field('cpu_socket_count') == False:
+                self.log.debug("Matching CPU socket count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_socket_count != available.cpu_socket_count:
+                    self.log.debug("Matching CPU socket count failed. Required: %s, Available: %s", required.cpu_socket_count, available.cpu_socket_count)
+                    return False
+        elif available.has_field('cpu_socket_count'):
+            self.log.debug("Rejecting available flavor because cpu_socket_count not required but available")
+            return False
+        
+        if required.has_field('cpu_core_count'):
+            self.log.debug("Matching CPU core count")
+            if available.has_field('cpu_core_count') == False:
+                self.log.debug("Matching CPU core count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_core_count != available.cpu_core_count:
+                    self.log.debug("Matching CPU core count failed. Required: %s, Available: %s", required.cpu_core_count, available.cpu_core_count)
+                    return False
+        elif available.has_field('cpu_core_count'):
+            self.log.debug("Rejecting available flavor because cpu_core_count not required but available")
+            return False
+        
+        if required.has_field('cpu_core_thread_count'):
+            self.log.debug("Matching CPU core thread count")
+            if available.has_field('cpu_core_thread_count') == False:
+                self.log.debug("Matching CPU core thread count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_core_thread_count != available.cpu_core_thread_count:
+                    self.log.debug("Matching CPU core thread count failed. Required: %s, Available: %s", required.cpu_core_thread_count, available.cpu_core_thread_count)
+                    return False
+        elif available.has_field('cpu_core_thread_count'):
+            self.log.debug("Rejecting available flavor because cpu_core_thread_count not required but available")
+            return False
+    
+        if required.has_field('cpu_feature'):
+            self.log.debug("Matching CPU feature list")
+            if available.has_field('cpu_feature') == False:
+                self.log.debug("Matching CPU feature list failed. Not available in flavor")
+                return False
+            else:
+                for feature in required.cpu_feature:
+                    if feature not in available.cpu_feature:
+                        self.log.debug("Matching CPU feature list failed. Required feature: %s is not present. Available features: %s", feature, available.cpu_feature)
+                        return False
+        elif available.has_field('cpu_feature'):
+            self.log.debug("Rejecting available flavor because cpu_feature not required but available")
+            return False
+        self.log.info("Successful match for Host EPA attributes")            
+        return True
+
+
+    def _match_placement_group_inputs(self, required, available):
+        self.log.info("Matching Host aggregate attributes")
+        
+        if not required and not available:
+            # Host aggregate not required and not available => success
+            self.log.info("Successful match for Host Aggregate attributes")
+            return True
+        if required and available:
+            # Host aggregate requested and available => Do a match and decide
+            xx = [ x.as_dict() for x in required ]
+            yy = [ y.as_dict() for y in available ]
+            for i in xx:
+                if i not in yy:
+                    self.log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s ", required, available)
+                    return False
+            self.log.info("Successful match for Host Aggregate attributes")
+            return True
+        else:
+            # Either of following conditions => Failure
+            #  - Host aggregate required but not available
+            #  - Host aggregate not required but available
+            self.log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s ", required, available)
+            return False
+                    
+    def match_epa_params(self, resource_info, request_params):
+        result = self._match_vm_flavor(getattr(request_params, 'vm_flavor'),
+                                       getattr(resource_info, 'vm_flavor'))
+        if result == False:
+            self.log.debug("VM Flavor mismatched")
+            return False
+
+        result = self._match_guest_epa(getattr(request_params, 'guest_epa'),
+                                       getattr(resource_info, 'guest_epa'))
+        if result == False:
+            self.log.debug("Guest EPA mismatched")
+            return False
+
+        result = self._match_vswitch_epa(getattr(request_params, 'vswitch_epa'),
+                                         getattr(resource_info, 'vswitch_epa'))
+        if result == False:
+            self.log.debug("Vswitch EPA mismatched")
+            return False
+
+        result = self._match_hypervisor_epa(getattr(request_params, 'hypervisor_epa'),
+                                            getattr(resource_info, 'hypervisor_epa'))
+        if result == False:
+            self.log.debug("Hypervisor EPA mismatched")
+            return False
+
+        result = self._match_host_epa(getattr(request_params, 'host_epa'),
+                                      getattr(resource_info, 'host_epa'))
+        if result == False:
+            self.log.debug("Host EPA mismatched")
+            return False
+
+        result = self._match_placement_group_inputs(getattr(request_params, 'host_aggregate'),
+                                                    getattr(resource_info, 'host_aggregate'))
+
+        if result == False:
+            self.log.debug("Host Aggregate mismatched")
+            return False
+        
+        return True
+
+    def _select_resource_flavor(self, account, vdu_init):
+        """ 
+            Select a existing flavor if it matches the request or create new flavor
+        """
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name = str(uuid.uuid4())
+        epa_types = ['vm_flavor', 'guest_epa', 'host_epa', 'host_aggregate', 'hypervisor_epa', 'vswitch_epa']
+        epa_dict = {k: v for k, v in vdu_init.as_dict().items() if k in epa_types}
+        flavor.from_dict(epa_dict)
+ 
+        rc, response = self.do_get_flavor_list(account)
+        if rc != RwTypes.RwStatus.SUCCESS:
+            self.log.error("Get-flavor-info-list operation failed for cloud account: %s",
+                        account.name)
+            raise OpenstackCALOperationFailure("Get-flavor-info-list operation failed for cloud account: %s" %(account.name))
+
+        flavor_id = None
+        flavor_list = response.flavorinfo_list
+        self.log.debug("Received %d flavor information from RW.CAL", len(flavor_list))
+        for flv in flavor_list:
+            self.log.info("Attempting to match compute requirement for VDU: %s with flavor %s",
+                       vdu_init.name, flv)
+            if self.match_epa_params(flv, vdu_init):
+                self.log.info("Flavor match found for compute requirements for VDU: %s with flavor name: %s, flavor-id: %s",
+                           vdu_init.name, flv.name, flv.id)
+                return flv.id
+
+        if account.openstack.dynamic_flavor_support is False:
+            self.log.error("Unable to create flavor for compute requirement for VDU: %s. VDU instantiation failed", vdu_init.name)
+            raise OpenstackCALOperationFailure("No resource available with matching EPA attributes")
+        else:
+            rc,flavor_id = self.do_create_flavor(account,flavor)
+            if rc != RwTypes.RwStatus.SUCCESS:
+                self.log.error("Create-flavor operation failed for cloud account: %s",
+                        account.name)
+                raise OpenstackCALOperationFailure("Create-flavor operation failed for cloud account: %s" %(account.name))
+            return flavor_id
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        """Create a new virtual deployment unit
+
+        Arguments:
+            account     - a cloud account
+            vdu_init  - information about VDU to create (RwcalYang.VDUInitParams)
+
+        Returns:
+            The vdu_id
+        """
+        ### First create required number of ports aka connection points
+        with self._use_driver(account) as drv:
+            ### If floating_ip is required and we don't have one, better fail before any further allocation
+            if vdu_init.has_field('allocate_public_address') and vdu_init.allocate_public_address:
+                if account.openstack.has_field('floating_ip_pool'):
+                    pool_name = account.openstack.floating_ip_pool
+                else:
+                    pool_name = None
+                floating_ip = self._allocate_floating_ip(drv, pool_name)
+            else:
+                floating_ip = None
+
+        port_list = []
+        network_list = []
+        for c_point in vdu_init.connection_points:
+            if c_point.virtual_link_id in network_list:
+                assert False, "Only one port per network supported. Refer: http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/nfv-multiple-if-1-net.html"
+            else:
+                network_list.append(c_point.virtual_link_id)
+            port_id = self._create_connection_point(account, c_point)
+            port_list.append(port_id)
+
+        if not vdu_init.has_field('flavor_id'):
+            vdu_init.flavor_id = self._select_resource_flavor(account,vdu_init)
+
+        with self._use_driver(account) as drv:
+            ### Now Create VM
+            vm           = RwcalYang.VMInfoItem()
+            vm.vm_name   = vdu_init.name
+            vm.flavor_id = vdu_init.flavor_id
+            vm.image_id  = vdu_init.image_id
+            vm_network   = vm.network_list.add()
+            vm_network.network_id = drv._mgmt_network_id
+            if vdu_init.has_field('vdu_init') and vdu_init.vdu_init.has_field('userdata'):
+                vm.cloud_init.userdata = vdu_init.vdu_init.userdata
+
+            if vdu_init.has_field('node_id'):
+                vm.user_tags.node_id   = vdu_init.node_id;
+
+            if vdu_init.has_field('availability_zone') and vdu_init.availability_zone.has_field('name'):
+                vm.availability_zone = vdu_init.availability_zone.name
+
+            if vdu_init.has_field('server_group'):
+                ### Get list of server group in openstack for name->id mapping
+                openstack_group_list = drv.nova_server_group_list()
+                group_id = [ i['id'] for i in openstack_group_list if i['name'] == vdu_init.server_group.name]
+                if len(group_id) != 1:
+                    raise OpenstackServerGroupError("VM placement failed. Server Group %s not found in openstack. Available groups" %(vdu_init.server_group.name, [i['name'] for i in openstack_group_list]))
+                vm.server_group = group_id[0]
+
+            for port_id in port_list:
+                port = vm.port_list.add()
+                port.port_id = port_id
+
+            pci_assignement = self.prepare_vpci_metadata(drv, vdu_init)
+            if pci_assignement != '':
+                vm.user_tags.pci_assignement = pci_assignement
+
+            vm_id = self.do_create_vm(account, vm, no_rwstatus=True)
+            self.prepare_vdu_on_boot(account, vm_id, floating_ip)
+            return vm_id
+
+    def prepare_vpci_metadata(self, drv, vdu_init):
+        pci_assignement = ''
+        ### TEF specific metadata creation for
+        virtio_vpci = []
+        sriov_vpci = []
+        virtio_meta = ''
+        sriov_meta = ''
+        ### For MGMT interface
+        if vdu_init.has_field('mgmt_vpci'):
+            xx = 'u\''+ drv._mgmt_network_id + '\' :[[u\'' + vdu_init.mgmt_vpci + '\', ' + '\'\']]'
+            virtio_vpci.append(xx)
+
+        for c_point in vdu_init.connection_points:
+            if c_point.has_field('vpci'):
+                if c_point.has_field('vpci') and c_point.type_yang == 'VIRTIO':
+                    xx = 'u\''+c_point.virtual_link_id + '\' :[[u\'' + c_point.vpci + '\', ' + '\'\']]'
+                    virtio_vpci.append(xx)
+                elif c_point.has_field('vpci') and c_point.type_yang == 'SR_IOV':
+                    xx = '[u\'' + c_point.vpci + '\', ' + '\'\']'
+                    sriov_vpci.append(xx)
+
+        if virtio_vpci:
+            virtio_meta += ','.join(virtio_vpci)
+
+        if sriov_vpci:
+            sriov_meta = 'u\'VF\': ['
+            sriov_meta += ','.join(sriov_vpci)
+            sriov_meta += ']'
+
+        if virtio_meta != '':
+            pci_assignement +=  virtio_meta
+            pci_assignement += ','
+
+        if sriov_meta != '':
+            pci_assignement +=  sriov_meta
+
+        if pci_assignement != '':
+            pci_assignement = '{' + pci_assignement + '}'
+
+        return pci_assignement
+
+
+
+    def prepare_vdu_on_boot(self, account, server_id, floating_ip):
+        cmd = PREPARE_VM_CMD.format(auth_url     = account.openstack.auth_url,
+                                    username     = account.openstack.key,
+                                    password     = account.openstack.secret,
+                                    tenant_name  = account.openstack.tenant,
+                                    mgmt_network = account.openstack.mgmt_network,
+                                    server_id    = server_id)
+
+        if floating_ip is not None:
+            cmd += (" --floating_ip "+ floating_ip.ip)
+
+        exec_path = 'python3 ' + os.path.dirname(openstack_drv.__file__)
+        exec_cmd = exec_path+'/'+cmd
+        self.log.info("Running command: %s" %(exec_cmd))
+        subprocess.call(exec_cmd, shell=True)
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        """Modify Properties of existing virtual deployment unit
+
+        Arguments:
+            account     -  a cloud account
+            vdu_modify  -  Information about VDU Modification (RwcalYang.VDUModifyParams)
+        """
+        ### First create required number of ports aka connection points
+        port_list = []
+        network_list = []
+        for c_point in vdu_modify.connection_points_add:
+            if c_point.virtual_link_id in network_list:
+                assert False, "Only one port per network supported. Refer: http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/nfv-multiple-if-1-net.html"
+            else:
+                network_list.append(c_point.virtual_link_id)
+            port_id = self._create_connection_point(account, c_point)
+            port_list.append(port_id)
+
+        ### Now add the ports to VM
+        for port_id in port_list:
+            with self._use_driver(account) as drv:
+                drv.nova_server_add_port(vdu_modify.vdu_id, port_id)
+
+        ### Delete the requested connection_points
+        for c_point in vdu_modify.connection_points_remove:
+            self.do_delete_port(account, c_point.connection_point_id, no_rwstatus=True)
+
+        if vdu_modify.has_field('image_id'):
+            with self._use_driver(account) as drv:
+                drv.nova_server_rebuild(vdu_modify.vdu_id, vdu_modify.image_id)
+
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        """Delete a virtual deployment unit
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu to be deleted
+
+        Returns:
+            None
+        """
+        if not vdu_id:
+            self.log.error("empty vdu_id during the vdu deletion")
+            return
+
+        with self._use_driver(account) as drv:
+            ### Get list of floating_ips associated with this instance and delete them
+            floating_ips = [ f for f in drv.nova_floating_ip_list() if f.instance_id == vdu_id ]
+            for f in floating_ips:
+                drv.nova_drv.floating_ip_delete(f)
+
+            ### Get list of port on VM and delete them.
+            port_list = drv.neutron_port_list(**{'device_id': vdu_id})
+
+        for port in port_list:
+            if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
+                self.do_delete_port(account, port['id'], no_rwstatus=True)
+
+        self.do_delete_vm(account, vdu_id, no_rwstatus=True)
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        """Get information about a virtual deployment unit.
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu
+
+        Returns:
+            Object of type RwcalYang.VDUInfoParams
+        """
+        with self._use_driver(account) as drv:
+
+            ### Get list of ports excluding the one for management network
+            port_list = [p for p in drv.neutron_port_list(**{'device_id': vdu_id}) if p['network_id'] != drv.get_mgmt_network_id()]
+
+            vm = drv.nova_server_get(vdu_id)
+
+            flavor_info = None
+            if ('flavor' in vm) and ('id' in vm['flavor']):
+                try:
+                    flavor_info = drv.nova_flavor_get(vm['flavor']['id'])
+                except Exception as e:
+                    self.log.critical("Exception encountered while attempting to get flavor info for flavor_id: %s. Exception: %s" %(vm['flavor']['id'], str(e)))
+
+            openstack_group_list = drv.nova_server_group_list()
+            server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
+            vdu_info = RwcalOpenstackPlugin._fill_vdu_info(vm,
+                                                           flavor_info,
+                                                           account.openstack.mgmt_network,
+                                                           port_list,
+                                                           server_group)
+            if vdu_info.state == 'active':
+                try:
+                    console_info = drv.nova_server_console(vdu_info.vdu_id)
+                except Exception as e:
+                    pass
+                else:
+                    vdu_info.console_url = console_info['console']['url']
+                    pass
+
+            return vdu_info
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu_list(self, account):
+        """Get information about all the virtual deployment units
+
+        Arguments:
+            account     - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VDUInfoParams
+        """
+        vnf_resources = RwcalYang.VNFResources()
+        with self._use_driver(account) as drv:
+            vms = drv.nova_server_list()
+            for vm in vms:
+                ### Get list of ports excluding one for management network
+                port_list = [p for p in drv.neutron_port_list(**{'device_id': vm['id']}) if p['network_id'] != drv.get_mgmt_network_id()]
+
+                flavor_info = None
+
+                if ('flavor' in vm) and ('id' in vm['flavor']):
+                    try:
+                        flavor_info = drv.nova_flavor_get(vm['flavor']['id'])
+                    except Exception as e:
+                        self.log.critical("Exception encountered while attempting to get flavor info for flavor_id: %s. Exception: %s" %(vm['flavor']['id'], str(e)))
+
+                else:
+                    flavor_info = None
+
+                openstack_group_list = drv.nova_server_group_list()
+                server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
+
+                vdu = RwcalOpenstackPlugin._fill_vdu_info(vm,
+                                                          flavor_info,
+                                                          account.openstack.mgmt_network,
+                                                          port_list,
+                                                          server_group)
+                if vdu.state == 'active':
+                    try:
+                        console_info = drv.nova_server_console(vdu.vdu_id)
+                    except Exception as e:
+                        pass
+                    else:
+                        vdu.console_url = console_info['console']['url']
+                        pass
+                vnf_resources.vdu_info_list.append(vdu)
+            return vnf_resources
+
+
diff --git a/rwcal/plugins/vala/rwcal_vsphere/CMakeLists.txt b/rwcal/plugins/vala/rwcal_vsphere/CMakeLists.txt
new file mode 100644
index 0000000..092d941
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_vsphere/CMakeLists.txt
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwcal_vsphere rwcal_vsphere.py)
diff --git a/rwcal/plugins/vala/rwcal_vsphere/Makefile b/rwcal/plugins/vala/rwcal_vsphere/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_vsphere/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_vsphere/rift/vsphere/vsphere.py b/rwcal/plugins/vala/rwcal_vsphere/rift/vsphere/vsphere.py
new file mode 100644
index 0000000..e726ea3
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_vsphere/rift/vsphere/vsphere.py
@@ -0,0 +1,86 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import libcloud.compute.providers
+import libcloud.compute.types
+
+from gi.repository import RwcalYang
+
+
+from . import core
+
+
+class Vsphere(core.Cloud):
+    """This class implements the abstract methods in the Cloud class.
+    This is the Vsphere CAL driver."""
+
+    def __init__(self):
+        super(Vsphere, self).__init__()
+        self._driver_class = libcloud.compute.providers.get_driver(
+                libcloud.compute.providers.Provider.VSPHERE)
+
+    def driver(self, account):
+        return self._driver_class(
+                username=account.username,
+                passwork=account.password,
+                url=url,
+                )
+
+    def get_image_list(self, account):
+        """
+        Return a list of the names of all available images.
+        """
+        images = self.driver(account).list_images()
+        return [image.name for image in images]
+
+    def create_vm(self, account, vminfo):
+        """
+        Create a new virtual machine.
+
+        @param account  - account information used authenticate the create of
+                          the virtual machine 
+        @param vmfinfo  - information about the virtual machine to create
+
+        """
+        node = self.driver(account).ex_create_node_from_template(
+                name=vminfo.vm_name,
+                template=vminfo.vsphere.template,
+                )
+
+        vminfo.vm_id = node.id
+
+        return node.id
+
+    def delete_vm(self, account, vm_id):
+        """
+        delete a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        node = Node()
+        node.id = vm_id
+        self.driver(account).destroy_node(node)
+
+    def reboot_vm(self, account, vm_id):
+        """
+        Reboot a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        node = Node()
+        node.id = vm_id
+        self.driver(account).reboot_node(node)
diff --git a/rwcal/plugins/vala/rwcal_vsphere/rwcal_vsphere.py b/rwcal/plugins/vala/rwcal_vsphere/rwcal_vsphere.py
new file mode 100644
index 0000000..2dcbd8c
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_vsphere/rwcal_vsphere.py
@@ -0,0 +1,238 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+import rw_status
+import rwlogger
+
+logger = logging.getLogger('rwcal.vsphere')
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+class ImageLocationError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class RwcalVspherePlugin(GObject.Object, RwCal.Cloud):
+    """This class implements the CAL VALA methods for Vsphere.
+    """
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-cal-log",
+                    subcategory="vsphere",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+            
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        raise NotImplementedError()
+
+    
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        raise NotImplementedError()
+    
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        raise NotImplementedError()        
+            
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        raise NotImplementedError()
+    
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        raise NotImplementedError()        
+    
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[""])
+    def do_get_virtual_link_list(self, account):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        raise NotImplementedError()            
+    
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        raise NotImplementedError()
+    
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_vdu_list(self, account):
+        raise NotImplementedError()        
diff --git a/rwcal/plugins/yang/CMakeLists.txt b/rwcal/plugins/yang/CMakeLists.txt
new file mode 100644
index 0000000..a1b24fe
--- /dev/null
+++ b/rwcal/plugins/yang/CMakeLists.txt
@@ -0,0 +1,46 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# 
+
+##
+# Parse the yang files
+##
+
+include(rift_yang)
+
+set(source_yang_files rwcal.yang)
+
+rift_generate_python_log_yang(
+    LOG_CATEGORY_NAME rw-cal-log
+    START_EVENT_ID 63000
+    OUT_YANG_FILE_VAR rw_cal_log_file
+    )
+
+rift_add_yang_target(
+  TARGET rwcal_yang
+  YANG_FILES
+    ${source_yang_files}
+    ${rw_cal_log_file}
+  COMPONENT ${PKG_LONG_NAME}
+  DEPENDS
+    mano-types_yang
+  LIBRARIES
+    rwschema_yang_gen
+    rwyang
+    rwlog
+    rwlog-mgmt_yang_gen
+    mano-types_yang_gen
+)
diff --git a/rwcal/plugins/yang/Makefile b/rwcal/plugins/yang/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwcal/plugins/yang/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/yang/rwcal.yang b/rwcal/plugins/yang/rwcal.yang
new file mode 100644
index 0000000..53caade
--- /dev/null
+++ b/rwcal/plugins/yang/rwcal.yang
@@ -0,0 +1,1226 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rwcal
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rwcal";
+  prefix "rwcal";
+
+  import rw-base {
+    prefix rwbase;
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-log {
+    prefix "rwlog";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  revision 2014-12-30 {
+    description
+        "Initial revision.";
+    reference
+        "RIFT RWCAL cloud data";
+  }
+
+
+  typedef connection-status {
+    description "Connection status for the cloud account";
+    type enumeration {
+      enum unknown;
+      enum validating;
+      enum success;
+      enum failure;
+    }
+  }
+
+  typedef disk-format {
+    type enumeration {
+      enum ami;
+      enum ari;
+      enum aki;
+      enum vhd;
+      enum vmdk;
+      enum raw;
+      enum qcow2;
+      enum vdi;
+      enum iso;
+    }
+  }
+
+  typedef container-format {
+    type enumeration{
+      enum ami;
+      enum ari;
+      enum aki;
+      enum bare;
+      enum ovf;
+    }
+  }
+
+  grouping connection-status {
+    container connection-status {
+      config false;
+      rwpb:msg-new CloudConnectionStatus;
+      leaf status {
+        type connection-status;
+      }
+      leaf details {
+        type string;
+      }
+    }
+  }
+
+  uses connection-status;
+
+  typedef sdn-account-type {
+    description "SDN account type";
+    type enumeration {
+      enum odl;
+      enum mock;
+      enum sdnsim;
+    }
+  }
+
+  grouping sdn-provider-auth {
+    leaf account-type {
+      type sdn-account-type;
+    }
+
+    choice provider-specific-info {
+      container odl {
+        leaf username {
+          type string {
+            length "1..255";
+          }
+        }
+
+        leaf password {
+          type string {
+            length "1..32";
+          }
+        }
+
+        leaf url {
+          type string {
+            length "1..255";
+          }
+        }
+      }
+      container mock {
+        leaf username {
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwsdn_mock";
+        }
+      }
+
+      container sdnsim {
+        leaf username {
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwsdn_sim";
+        }
+      }
+    }
+  }
+
+  grouping provider-auth {
+    leaf account-type {
+      type manotypes:cloud-account-type;
+    }
+
+    choice provider-specific-info {
+      container mock {
+        leaf username {
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwcal_mock";
+        }
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+      }
+      container aws {
+        leaf key {
+          type string;
+        }
+
+        leaf secret {
+          type string;
+        }
+
+        leaf region {
+          type string;
+        }
+        leaf vpcid {
+          description "VPC ID to use to instantiate EC2 instances";
+          type string;
+        }
+        leaf ssh-key {
+          description "Key pair name to connect to EC2 instance";
+          type string;
+        }
+        leaf availability-zone {
+          description "Availability zone where EC2 instance should
+              be started";
+          type string;
+        }
+        leaf default-subnet-id {
+          description "Default subnet ID to create network
+              interface at instance creation time";
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwcal_aws";
+        }
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+      }
+
+      container openstack {
+        leaf key {
+          type string;
+          mandatory true;
+        }
+
+        leaf secret {
+          type string;
+          mandatory true;
+        }
+
+        leaf auth_url {
+          type string;
+          mandatory true;
+        }
+
+        leaf tenant {
+          type string;
+          mandatory true;
+        }
+
+        leaf admin {
+          type boolean;
+          default false;
+        }
+
+        leaf mgmt-network {
+          type string;
+          mandatory true;
+        }
+
+        leaf plugin-name {
+          type string;
+          default "rwcal_openstack";
+        }
+
+        leaf-list security-groups {
+          type string;
+          description "Names of the security groups for the VM";
+        }
+        
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+
+        leaf floating-ip-pool {
+          type string;
+          description "Name of floating IP pool to use for floating IP address assignement";
+        }
+
+        leaf cert-validate {
+          type boolean;
+          default false;
+          description "Certificate validatation policy in case of SSL/TLS connection";
+        }
+        
+      }
+
+      container openmano {
+        leaf host {
+          type string;
+          default "localhost";
+        }
+
+        leaf port {
+          type uint16;
+          default 9090;
+        }
+
+        leaf tenant-id {
+          type string {
+            length "36";
+          }
+          mandatory true;
+        }
+
+        leaf plugin-name {
+          type string;
+          default "rwcal_openmano";
+        }
+      }
+
+      container vsphere {
+        leaf username {
+          type string;
+        }
+
+        leaf password {
+          type string;
+        }
+
+        leaf url {
+          type string;
+        }
+
+        leaf plugin-name {
+          type string;
+          default "rwcal-python";
+        }
+
+        leaf dynamic-flavor-support {
+          type boolean;
+          default false;
+        }
+      }
+
+      container cloudsim {
+        leaf plugin-name {
+          type string;
+          default "rwcal_cloudsim";
+        }
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+      }
+
+      container cloudsim_proxy {
+        leaf host {
+          type string;
+          default "localhost";
+        }
+        leaf plugin-name {
+          type string;
+          default "rwcal_cloudsimproxy";
+        }
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+      }
+
+      container openvim {
+        leaf host {
+          type string;
+          mandatory true;
+        }
+        leaf port {
+          type uint16;
+          default 9080;
+        }
+        leaf tenant-name {
+          type string;
+          description "Mandatory parameter to indicate openvim tenant name";
+          mandatory true;
+        }
+        leaf mgmt-network {
+          type string;
+          mandatory true;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwcal_openmano_vimconnector";
+        }
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+        container image-management {
+          description " Information required for OpenVim image upload operation";
+
+          leaf username {
+            description "Username for host access";
+            type string;
+          }
+          leaf password {
+            description "Password for host access";
+            type string;
+          }
+          leaf image-directory-path {
+            description "Name of the directory on the host where image needs to be copied";
+            type string;
+            default "/opt/VNF/images";
+          }
+        }
+      }
+    }
+  }
+  
+  grouping vm-info-item {
+    leaf vm-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 255;
+      type string;
+    }
+
+    leaf vm-size {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf vm-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf flavor-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf state {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf availability-zone {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf tenant-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf host-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf management-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf public-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf allocate-public-address {
+      rwpb:field-inline "true";
+      description "If this VM should allocate a floating public IP address";
+      type boolean;
+      default false;
+    }
+
+    list private-ip-list {
+      key "ip-address";
+
+      leaf ip-address {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list public-ip-list {
+      key "ip-address";
+
+      leaf ip-address {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list port-list {
+      key "port-id";
+      leaf port-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list network-list {
+      key "network-id";
+      leaf network-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    container cloud-init {
+      leaf userdata {
+        description
+            "The userdata field for cloud-init should contain
+             the contents of the script that cloud-init should
+             invoke when configuring the system. Note that this
+             script is expected to be in the cloud-config format";
+        type string;
+      }
+    }
+
+    container user_tags {
+
+      leaf node-id {
+        type string;
+      }
+
+      leaf pci_assignement {
+        type string;
+      }
+
+      leaf tag1 {
+        type string;
+      }
+    }
+
+    leaf server-group {
+      type string;
+    }
+  }
+
+  grouping image-info-item {
+    leaf id {
+      type string;
+    }
+
+    leaf name {
+      type string;
+    }
+
+    choice image_file {
+      leaf location {
+        description "Image URL location";
+        type string;
+      }
+
+      leaf fileno {
+        description "Image file descriptor";
+        type uint32;
+      }
+    }
+
+    leaf checksum {
+      type string;
+    }
+
+    leaf virtual_size_mbytes {
+      description "Virtual size of the image";
+      type uint64;
+    }
+
+    leaf disk_format {
+      description "Format of the Disk";
+      type disk-format;
+      default "qcow2";
+    }
+
+    leaf container_format {
+      description "Format of the container";
+      type container-format;
+      default "bare";
+    }
+
+    leaf state {
+      description "State of the Image object in CAL";
+      type enumeration {
+        enum active;
+        enum inactive;
+        enum failed;
+        enum unknown;
+      }
+      default "unknown";
+    }
+
+    container user-tags {
+      description "User tags associated with Image";
+      leaf checksum {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+  }
+
+  grouping network-info-item {
+    leaf network-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf network-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf subnet {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    uses manotypes:provider-network;
+  }
+
+  grouping port-info-item {
+    leaf port-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 255;
+      type string;
+    }
+
+    leaf port-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf port-state {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf network-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf ip-address {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf vm-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf port-type {
+      description "Type of the port";
+      type enumeration {
+        enum normal;
+        enum macvtap;
+        enum direct;
+      }
+      default "normal";
+    }
+
+    choice provider-specific-info {
+      container lxc {
+        leaf veth-name {
+          type string;
+        }
+      }
+    }
+  }
+
+  container cloud-accounts {
+    list cloud-account-list {
+      rwpb:msg-new CloudAccount;
+      key "name";
+
+      leaf name {
+        type string;
+      }
+      uses provider-auth;
+    }
+  }
+
+  container vim-resources {
+    rwpb:msg-new VimResources;
+    config false;
+
+    list vminfo-list {
+      rwpb:msg-new VMInfoItem;
+      config false;
+      key "vm-id";
+
+      uses vm-info-item;
+    }
+
+    list imageinfo-list {
+      rwpb:msg-new ImageInfoItem;
+      config false;
+      key "id";
+
+      uses image-info-item;
+    }
+
+    list tenantinfo-list {
+      rwpb:msg-new TenantInfoItem;
+      config false;
+      key "tenant-id";
+
+      leaf tenant-name {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf tenant-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list userinfo-list {
+      rwpb:msg-new UserInfoItem;
+      config false;
+      key "user-id";
+
+      leaf user-name{
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf user-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list roleinfo-list {
+      rwpb:msg-new RoleInfoItem;
+      config false;
+      key "role-id";
+
+      leaf role-name {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf role-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list hostinfo-list {
+      rwpb:msg-new HostInfoItem;
+      config false;
+      key "host-id";
+
+      leaf host-name {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf host-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list networkinfo-list {
+      rwpb:msg-new NetworkInfoItem;
+      config false;
+      key "network-id";
+
+      uses network-info-item;
+    }
+
+    list portinfo-list {
+      rwpb:msg-new PortInfoItem;
+      config false;
+      key "port-id";
+
+      uses port-info-item;
+    }
+
+    list flavorinfo-list {
+      rwpb:msg-new FlavorInfoItem;
+      config false;
+      key "id";
+
+      leaf id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf name {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 255;
+        type string;
+      }
+
+      uses manotypes:vm-flavor;
+      uses manotypes:guest-epa;
+      uses manotypes:vswitch-epa;
+      uses manotypes:hypervisor-epa;
+      uses manotypes:host-epa;
+      uses manotypes:placement-group-input;
+    }
+  }
+
+  grouping virtual-link-create-params {
+    leaf name {
+      description "Name of the Virtual-Link";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 255;
+      type string;
+    }
+
+    leaf subnet {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+    leaf associate-public-ip {
+      type boolean;
+      default false;
+    }
+    leaf vim-network-name {
+      description
+          "Name of network in VIM account. This is used to indicate
+          pre-provisioned network name in cloud account.";
+      type string;
+    }
+
+    uses manotypes:provider-network;
+    uses manotypes:ip-profile-info;
+  }
+
+
+  container virtual-link-req-params {
+    description "This object defines the parameters required to create a virtual-link";
+    rwpb:msg-new VirtualLinkReqParams;
+    uses virtual-link-create-params;
+  }
+
+
+  grouping connection-point-type {
+    leaf type {
+      description
+          "Specifies the type of connection point
+             VIRTIO          : Use the traditional VIRTIO interface.
+             PCI-PASSTHROUGH : Use PCI-PASSTHROUGH interface.
+             SR-IOV          : Use SR-IOV interface.";
+      type enumeration {
+        enum VIRTIO;
+        enum PCI-PASSTHROUGH;
+        enum SR-IOV;
+      }
+      default "VIRTIO";
+    }
+  }
+
+
+  grouping vdu-create-params {
+    leaf name {
+      description "Name of the VDU";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 255;
+      type string;
+    }
+
+    leaf mgmt-vpci {
+      description
+          "Specifies the virtual PCI address. Expressed in
+           the following format dddd:dd:dd.d. For example
+           0000:00:12.0. This information can be used to
+           pass as metadata during the VM creation.";
+      type string;
+    }
+
+    uses manotypes:vm-flavor;
+    uses manotypes:guest-epa;
+    uses manotypes:vswitch-epa;
+    uses manotypes:hypervisor-epa;
+    uses manotypes:host-epa;
+
+    leaf node-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf flavor-id {
+      description "CAL assigned flavor-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-id {
+      description "CAL assigned image-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-name {
+      description "Image name which can be used to lookup the image-id";
+      type string;
+      rwpb:field-inline "true";
+      rwpb:field-string-max 256;
+    }
+
+    leaf image-checksum {
+      description "Image md5sum checksum used in combination with image name to lookup image-id ";
+      type string;
+      rwpb:field-inline "true";
+      rwpb:field-string-max 32;
+    }
+
+    uses manotypes:placement-group-input;
+    
+    list connection-points {
+      key "name";
+      leaf name {
+        description "Name of the connection point";
+        type string;
+      }
+      leaf virtual-link-id {
+        description "CAL assigned resource Id for the Virtual Link";
+        type string;
+      }
+      leaf associate-public-ip {
+        type boolean;
+        default false;
+      }
+      
+      leaf vpci {
+        description
+            "Specifies the virtual PCI address. Expressed in
+             the following format dddd:dd:dd.d. For example
+             0000:00:12.0. This information can be used to
+             pass as metadata during the VM creation.";
+        type string;
+      }
+
+      leaf security-group {
+        description "Name of the security group";
+        type string;
+      }
+
+      uses connection-point-type;
+    }
+
+    leaf allocate-public-address {
+      description "If this VDU needs public IP address";
+      type boolean;
+      default false;
+    }
+
+    container vdu-init {
+      leaf userdata {
+        description
+            "The userdata field for vdu-init should contain
+             the contents of the script that cloud-init should
+             invoke when configuring the system. Note that this
+             script is expected to be in the cloud-config format";
+        type string;
+      }
+    }
+  }
+
+  container vdu-init-params {
+    description "This object defines the parameters required to create a VDU";
+    rwpb:msg-new VDUInitParams;
+    uses vdu-create-params;
+  }
+
+  container vdu-modify-params {
+    description "This object defines the parameters required to modify VDU";
+    rwpb:msg-new VDUModifyParams;
+
+    leaf vdu-id {
+      description "CAL assigned id for VDU to which this connection point belongs";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-id {
+      description "CAL assigned image-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    list connection-points-add {
+      key "name";
+      leaf name {
+        description "Name of the connection point";
+        type string;
+      }
+      leaf virtual-link-id {
+        description "CAL assigned resource Id for the Virtual Link";
+        type string;
+      }
+      leaf associate-public-ip {
+        type boolean;
+        default false;
+      }
+
+      uses connection-point-type;
+    }
+
+    list connection-points-remove {
+      key "connection-point-id";
+      leaf connection-point-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+  }
+
+  grouping connection-point-info-params {
+    leaf connection-point-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf name {
+      description "Name of the connection point";
+      type string;
+    }
+
+    leaf virtual-link-id {
+      description "CAL assigned resource ID of the Virtual-Link";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf vdu-id {
+      description "CAL assigned id for VDU to which this connection point belongs";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf state {
+      description "CMP agnostic generic state of the connection point";
+      type enumeration {
+        enum active;
+        enum inactive;
+        enum failed;
+        enum unknown;
+      }
+    }
+
+    leaf ip-address {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf public-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+  }
+
+  grouping virtual-link-info-params {
+    leaf name {
+      description "Name of the Virtual-Link";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 255;
+      type string;
+    }
+
+    leaf state {
+      description "State of the Virtual Link";
+      type enumeration {
+        enum active;
+        enum inactive;
+        enum failed;
+        enum unknown;
+      }
+      default "unknown";
+    }
+
+    leaf virtual-link-id {
+      description "CAL assigned resource ID of the Virtual-Link";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    list connection-points {
+      key connection-point-id;
+      uses connection-point-info-params;
+    }
+
+    leaf subnet {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    uses manotypes:provider-network;
+
+  }
+
+  grouping vdu-info-params {
+    leaf vdu-id {
+      description "CAL assigned id for VDU";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+    leaf name {
+      description "Name of the VDU";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 255;
+      type string;
+    }
+
+    leaf flavor-id {
+      description "CAL assigned flavor-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-id {
+      description "CAL assigned image-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf node-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf state {
+      description "State of the VDU";
+      type enumeration {
+        enum active;
+        enum inactive;
+        enum failed;
+        enum unknown;
+      }
+      default "unknown";
+    }
+
+    leaf management-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf public-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    uses manotypes:vm-flavor;
+    uses manotypes:guest-epa;
+    uses manotypes:vswitch-epa;
+    uses manotypes:hypervisor-epa;
+    uses manotypes:host-epa;
+    uses manotypes:placement-group-input;
+    
+    list connection-points {
+      key connection-point-id;
+      uses connection-point-info-params;
+    }
+    leaf console-url {
+      type string;
+      description "Console URL from the VIM, if available";
+    }
+  }
+
+  container vnf-resources {
+    rwpb:msg-new VNFResources;
+    config false;
+
+    list virtual-link-info-list {
+      rwpb:msg-new VirtualLinkInfoParams;
+      config false;
+      key virtual-link-id;
+      uses virtual-link-info-params;
+    }
+
+    list vdu-info-list {
+      rwpb:msg-new VDUInfoParams;
+      config false;
+      key vdu-id;
+      uses vdu-info-params;
+    }
+  }
+}
+
+/* vim: set ts=2:sw=2: */
diff --git a/rwcal/rift/cal/client.py b/rwcal/rift/cal/client.py
new file mode 100644
index 0000000..4717b0b
--- /dev/null
+++ b/rwcal/rift/cal/client.py
@@ -0,0 +1,68 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file client.py
+@author Varun Prasad(varun.prasad@riftio.com)
+@date 2016-06-14
+"""
+
+import os
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwLog', '1.0')
+
+from gi.repository import RwcalYang
+
+import rift.cal.utils as cal_utils
+
+
+class CloudsimClient(cal_utils.CloudSimCalMixin):
+    """Cloudsim client that handles interactions with the server.
+    """
+    def __init__(self, log):
+        super().__init__()
+        self.log = log
+
+    @property
+    def images(self):
+        _, images = self.cal.get_image_list(self.account)
+        return images.imageinfo_list or []
+
+    @property
+    def vlinks(self):
+        _, vlinks = self.cal.get_virtual_link_list(self.account)
+        return vlinks.virtual_link_info_list or []
+
+    @property
+    def vdus(self):
+        _, vdus = self.cal.get_vdu_list(self.account)
+        return vdus.vdu_info_list or []
+
+    def upload_image(self, location, name=None):
+        """Onboard image to cloudsim server."""
+
+        image = RwcalYang.ImageInfoItem()
+        image.name = name or os.path.basename(location)
+        image.location = location
+        image.disk_format = "qcow2"
+        rc, image.id = self.cal.create_image(self.account, image)
+
+        self.log.info("Image created: {}".format(image.as_dict()))
+
+        return image
diff --git a/rwcal/rift/cal/cloudsim b/rwcal/rift/cal/cloudsim
new file mode 100644
index 0000000..fc2e4dd
--- /dev/null
+++ b/rwcal/rift/cal/cloudsim
@@ -0,0 +1,248 @@
+#!/usr/bin/env python3
+
+import argparse
+import logging
+import os
+import sys
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwLog', '1.0')
+
+import rift.cal.server as cal_server
+import rift.cal.client as cal_client
+import rift.cal.utils as cal_utils
+import rift.rwcal.cloudsim.lxc as lxc
+import rift.rwcal.cloudsim.lvm as lvm
+import rift.rwcal.cloudsim.shell as shell
+
+from prettytable import PrettyTable
+
+
+START_PARSER = "start"
+STOP_PARSER = "stop"
+CLEAN_PARSER = "clean"
+FCLEAN_PARSER = "force-clean"
+IMAGE_PARSER = "image-create"
+STATUS_PARSER = "status"
+
+
+class CloudsimOperations(cal_utils.CloudSimCalMixin):
+    def __init__(self, args):
+        super().__init__()
+        self.log = cal_utils.Logger(
+                    daemon_mode=False,
+                    log_name="Parser",
+                    log_level=logging.getLevelName(args.log_level)).logger
+
+        self.args = args
+        self.operations = cal_server.CloudsimServerOperations(self.log)
+        self.client = cal_client.CloudsimClient(self.log)
+        self._cal, self._account = None, None
+
+    @property
+    def log_file(self):
+        return cal_utils.Logger.LOG_FILE
+
+    @cal_utils.check_and_create_bridge
+    def start_server(self):
+        self.operations.start_server(foreground=self.args.foreground)
+
+    @cal_utils.check_and_create_bridge
+    def stop_server(self):
+        self.operations.stop_server()
+
+    @cal_utils.check_and_create_bridge
+    def clean_resources(self):
+        """Clean all resource using rest APIs. """
+        self.operations.clean_server(images=self.args.all)
+
+    @cal_utils.check_and_create_bridge
+    def upload_image(self):
+        """Onboard image to cloudsim server."""
+        self.client.upload_image(self.args.location, name=self.args.name)
+
+    def force_clean_resources(self):
+        """Force clean up all resource. """
+        self.log.info("Cleaning up logs")
+        shell.command("rm -f {}".format(self.log_file))
+
+        self.log.info("Cleaning up PID file")
+        shell.command("rm -f {}".format(self.operations.PID_FILE))
+
+        try:
+            self.log.info("Purging LXC resources")
+            for container in lxc.containers():
+                lxc.stop(container)
+
+            for container in lxc.containers():
+                lxc.destroy(container)
+
+            lvm.destroy('rift')
+
+        except shell.ProcessError:
+            self.log.exception("Unable to purge resources. Trying a force clean now.")
+            lxc.force_clean()
+
+    @cal_utils.check_and_create_bridge
+    def show_status(self):
+
+        cld_tbl = PrettyTable(['PID', 'Status', 'Log file'])
+
+        pid = self.operations.pid
+        if pid:
+            cld_tbl.add_row([pid, "RUNNING", self.log_file])
+        else:
+            cld_tbl.add_row(["-", "STOPPED", self.log_file])
+
+        print ("Cloudsim server:")
+        print (cld_tbl)
+
+        if not pid:
+            return
+
+        # Images
+        img_tbl = PrettyTable(['ID', 'Name', 'Format'])
+        vlink_tbl = PrettyTable([
+                'ID', 'Name', 'Bridge Name', 'State', 'Subnet', 'Ports', "IPs"])
+        vdu_tbl = PrettyTable([
+            'ID', 'Name', 'LXC Name', 'IP', 'State', 'Ports', "VLink ID"])
+
+
+        images = self.client.images
+        if images:
+            for image in images:
+                img_tbl.add_row([image.id, image.name, image.disk_format])
+
+            print ("Images:")
+            print (img_tbl)
+
+        vlinks = self.client.vlinks
+        if vlinks:
+            for vlink in vlinks:
+
+                ports, ips = [], []
+                for cp in vlink.connection_points:
+                    ports.append("{} ({})".format(cp.name, cp.connection_point_id))
+                    ips.append(cp.ip_address)
+
+                vlink_tbl.add_row([
+                    vlink.virtual_link_id,
+                    vlink.name,
+                    vlink.name[:15],
+                    vlink.state,
+                    vlink.subnet,
+                    "\n".join(ports),
+                    "\n".join(ips)])
+
+            print ("Vlink:")
+            print (vlink_tbl)
+
+
+        lxc_to_ip = lxc.ls_info()
+        def get_lxc_name(ip):
+            for lxc_name, ips in lxc_to_ip.items():
+                if str(ip) in ips:
+                    return lxc_name
+
+            return ""
+
+        vdus = self.client.vdus
+        if vdus:
+            for vdu in vdus:
+                ports, links = [], []
+                for cp in vdu.connection_points:
+                    ports.append("{} ({})".format(cp.name, cp.ip_address))
+                    links.append(cp.virtual_link_id)
+
+                vdu_tbl.add_row([
+                    vdu.vdu_id, vdu.name, get_lxc_name(vdu.public_ip), vdu.public_ip,
+                    vdu.state, "\n".join(ports), "\n".join(links)])
+
+            print ("VDU:")
+            print (vdu_tbl)
+
+
+def parse(arguments):
+    parser = argparse.ArgumentParser(description=__doc__,
+                                    formatter_class=argparse.RawDescriptionHelpFormatter)
+    parser.add_argument(
+            '--log-level', '-l',
+            default="WARNING",
+            type=str,
+            choices=["INFO", "DEBUG", "WARNING", "ERROR"],
+            help="Set log level, defaults to warning and above.")
+
+    subparsers = parser.add_subparsers()
+
+    start_parser = subparsers.add_parser(START_PARSER, help="Start the server")
+    start_parser.add_argument(
+            '--foreground', "-f",
+            help="Run the server in the foreground. The logs are sent to console.",
+            default=False,
+            action="store_true")
+    start_parser.set_defaults(which=START_PARSER)
+
+    stop_parser = subparsers.add_parser(STOP_PARSER, help="Stop the server")
+    stop_parser.set_defaults(which=STOP_PARSER)
+
+    clean_parser = subparsers.add_parser(
+            CLEAN_PARSER,
+            help="Clean LXC resources. By default all resources except " + \
+                 "images are cleared.")
+    clean_parser.add_argument(
+            '--all', '-a', 
+            help="Cleans up all resources including images",
+            default=False,
+            action="store_true")
+    clean_parser.set_defaults(which=CLEAN_PARSER)
+
+    fclean_parser = subparsers.add_parser(
+            FCLEAN_PARSER,
+            help="Force clean all lxc resources")
+    fclean_parser.set_defaults(which=FCLEAN_PARSER)
+
+    image_parser = subparsers.add_parser(IMAGE_PARSER, help="Upload images")
+    image_parser.add_argument(
+            '--name', '-n',
+            help="(Optional) Name of the image")
+    image_parser.add_argument(
+            '--location', '-l',
+            help="Image location. If name is not specified the basename of " + \
+                 "the image path is used.",
+            required=True)
+    image_parser.set_defaults(which=IMAGE_PARSER)
+
+    show_parser = subparsers.add_parser(
+            STATUS_PARSER,
+            help="Shows the current status of LXC")
+    show_parser.set_defaults(which=STATUS_PARSER)
+
+    args = parser.parse_args(arguments)
+
+    return args
+
+
+def main(args):
+
+    args = parse(args)
+
+    operations = CloudsimOperations(args)
+
+    if args.which == START_PARSER:
+        operations.start_server()
+    elif args.which == STOP_PARSER:
+        operations.stop_server()
+    elif args.which == FCLEAN_PARSER:
+        operations.force_clean_resources()
+    elif args.which == CLEAN_PARSER:
+        operations.clean_resources()
+    elif args.which == IMAGE_PARSER:
+        operations.upload_image()
+    elif args.which == STATUS_PARSER:
+        operations.show_status()
+
+
+if __name__ == "__main__":
+    main(sys.argv[1:])
diff --git a/rwcal/rift/cal/rwcal_status.py b/rwcal/rift/cal/rwcal_status.py
new file mode 100644
index 0000000..6867140
--- /dev/null
+++ b/rwcal/rift/cal/rwcal_status.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+#
+# @file rwcal_status.py
+# @brief This module defines Python utilities for dealing with rwcalstatus codes.
+
+import traceback
+import functools
+import gi
+gi.require_version('RwTypes', '1.0')
+
+from gi.repository import RwTypes, RwCal
+
+def rwcalstatus_from_exc_map(exc_map):
+    """ Creates an rwcalstatus decorator from a dictionary mapping exception
+    types to rwstatus codes, and return a error object containing Exception details
+    """
+
+    # A decorator that maps a Python exception to a particular return code.
+    # Also returns an object containing the error msg, traceback and rwstatus
+    # Automatically returns RW_SUCCESS when no Python exception was thrown.
+    # Prevents us from having to use try: except: handlers around every function call.
+
+    def rwstatus(arg=None, ret_on_failure=None):
+        def decorator(func):
+            @functools.wraps(func)
+            def wrapper(*args, **kwds):
+                rwcal_status = RwCal.RwcalStatus()
+                try:
+                    ret = func(*args, **kwds)
+
+                except Exception as e:
+                    rwcal_status.traceback = traceback.format_exc()
+                    rwcal_status.error_msg = str(e)
+
+                    ret_code = [status for exc, status in exc_map.items() if isinstance(e, exc)]
+                    ret_list = [None] if ret_on_failure is None else list(ret_on_failure)
+                    if len(ret_code):
+                        rwcal_status.status = ret_code[0]
+                    else:
+                        # If it was not explicitly mapped, print the full traceback as this
+                        # is not an anticipated error.
+                        traceback.print_exc()
+                        rwcal_status.status = RwTypes.RwStatus.FAILURE
+
+                    ret_list.insert(0, rwcal_status)
+                    return tuple(ret_list)
+
+
+                rwcal_status.status = RwTypes.RwStatus.SUCCESS
+                rwcal_status.traceback = ""
+                rwcal_status.error_msg = ""
+                ret_list = [rwcal_status]
+                if ret is not None:
+                    if type(ret) == tuple:
+                        ret_list.extend(ret)
+                    else:
+                        ret_list.append(ret)
+
+                return tuple(ret_list)
+
+            return wrapper
+
+        if isinstance(arg, dict):
+            exc_map.update(arg)
+            return decorator
+        elif ret_on_failure is not None:
+            return decorator
+        else:
+            return decorator(arg)
+
+    return rwstatus
diff --git a/rwcal/rift/cal/server/__init__.py b/rwcal/rift/cal/server/__init__.py
new file mode 100644
index 0000000..b81f6c5
--- /dev/null
+++ b/rwcal/rift/cal/server/__init__.py
@@ -0,0 +1,26 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file __init__.py
+@author Austin Cormier(austin.cormier@riftio.com)
+@author Varun Prasad(varun.prasad@riftio.com)
+@date 2016-06-14
+"""
+
+
+from .server import CalServer
+from .operations import CloudsimServerOperations
\ No newline at end of file
diff --git a/rwcal/rift/cal/server/app.py b/rwcal/rift/cal/server/app.py
new file mode 100644
index 0000000..355d653
--- /dev/null
+++ b/rwcal/rift/cal/server/app.py
@@ -0,0 +1,543 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file app.py
+@author Austin Cormier(austin.cormier@riftio.com)
+@author Varun Prasad(varun.prasad@riftio.com)
+@date 2016-06-14
+"""
+
+import asyncio
+import collections
+import concurrent.futures
+import logging
+import sys
+
+import tornado
+import tornado.httpserver
+import tornado.web
+import tornado.platform.asyncio
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwLog', '1.0')
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+    RwCal,
+    RwcalYang,
+    RwTypes,
+)
+
+logger = logging.getLogger(__name__)
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class CalCallFailure(Exception):
+    pass
+
+
+class RPCParam(object):
+    def __init__(self, key, proto_type=None):
+        self.key = key
+        self.proto_type = proto_type
+
+
+class CalRequestHandler(tornado.web.RequestHandler):
+    def initialize(self, log, loop, cal, account, executor, cal_method,
+                   input_params=None, output_params=None):
+        self.log = log
+        self.loop = loop
+        self.cal = cal
+        self.account = account
+        self.executor = executor
+        self.cal_method = cal_method
+        self.input_params = input_params
+        self.output_params = output_params
+
+    def wrap_status_fn(self, fn, *args, **kwargs):
+
+        ret = fn(*args, **kwargs)
+        if not isinstance(ret, collections.Iterable):
+            ret = [ret]
+
+        rw_status = ret[0]
+
+        if type(rw_status) is RwCal.RwcalStatus:
+            rw_status = rw_status.status
+
+        if type(rw_status) != RwTypes.RwStatus:
+            raise ValueError("First return value of %s function was not a RwStatus" %
+                             fn.__name__)
+
+        if rw_status != RwTypes.RwStatus.SUCCESS:
+            msg = "%s returned %s" % (fn.__name__, str(rw_status))
+            self.log.error(msg)
+            raise CalCallFailure(msg)
+
+        return ret[1:]
+
+    @tornado.gen.coroutine
+    def post(self):
+        def body_to_cal_args():
+            cal_args = []
+            if self.input_params is None:
+                return cal_args
+
+            input_dict = tornado.escape.json_decode(self.request.body)
+            if len(input_dict) != len(self.input_params):
+                raise ValueError("Got %s parameters, expected %s" %
+                                 (len(input_dict), len(self.input_params)))
+
+            for input_param in self.input_params:
+                key = input_param.key
+                value = input_dict[key]
+                proto_type = input_param.proto_type
+
+                if proto_type is not None:
+                    proto_cls = getattr(RwcalYang, proto_type)
+                    self.log.debug("Deserializing into %s type", proto_cls)
+                    value = proto_cls.from_dict(value)
+
+                cal_args.append(value)
+
+            return cal_args
+
+        def cal_return_vals(return_vals):
+            output_params = self.output_params
+            if output_params is None:
+                output_params = []
+
+            if len(return_vals) != len(output_params):
+                raise ValueError("Got %s return values.  Expected %s",
+                                 len(return_vals), len(output_params))
+
+            write_dict = {"return_vals": []}
+            for i, output_param in enumerate(output_params):
+                key = output_param.key
+                proto_type = output_param.proto_type
+                output_value = return_vals[i]
+
+                if proto_type is not None:
+                    output_value = output_value.as_dict()
+
+                return_val = {
+                        "key": key,
+                        "value": output_value,
+                        "proto_type": proto_type,
+                        }
+
+                write_dict["return_vals"].append(return_val)
+
+            return write_dict
+
+        @asyncio.coroutine
+        def handle_request():
+            self.log.debug("Got cloudsimproxy POST request: %s", self.request.body)
+            cal_args = body_to_cal_args()
+
+            # Execute the CAL request in a seperate thread to prevent
+            # blocking the main loop.
+            return_vals = yield from self.loop.run_in_executor(
+                    self.executor,
+                    self.wrap_status_fn,
+                    getattr(self.cal, self.cal_method),
+                    self.account,
+                    *cal_args
+                    )
+
+            return cal_return_vals(return_vals)
+
+        f = asyncio.ensure_future(handle_request(), loop=self.loop)
+        return_dict = yield tornado.platform.asyncio.to_tornado_future(f)
+
+        self.log.debug("Responding to %s RPC with %s", self.cal_method, return_dict)
+
+        self.clear()
+        self.set_status(200)
+        self.write(return_dict)
+
+
+class CalProxyApp(tornado.web.Application):
+    def __init__(self, log, loop, cal_interface, cal_account):
+        self.log = log
+        self.loop = loop
+        self.cal = cal_interface
+        self.account = cal_account
+
+        attrs = dict(
+            log=self.log,
+            loop=self.loop,
+            cal=cal_interface,
+            account=cal_account,
+            # Create an executor with a single worker to prevent
+            # having multiple simulteneous calls into CAL (which is not threadsafe)
+            executor=concurrent.futures.ThreadPoolExecutor(1)
+            )
+
+        def mk_attrs(cal_method, input_params=None, output_params=None):
+            new_attrs = {
+                    "cal_method": cal_method,
+                    "input_params": input_params,
+                    "output_params": output_params
+                    }
+            new_attrs.update(attrs)
+
+            return new_attrs
+
+        super(CalProxyApp, self).__init__([
+            (r"/api/get_image_list", CalRequestHandler,
+                mk_attrs(
+                    cal_method="get_image_list",
+                    output_params=[
+                        RPCParam("images", "VimResources"),
+                        ]
+                    ),
+                ),
+
+            (r"/api/create_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="create_image",
+                    input_params=[
+                        RPCParam("image", "ImageInfoItem"),
+                        ],
+                    output_params=[
+                        RPCParam("image_id"),
+                        ]
+                    ),
+                ),
+
+            (r"/api/delete_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="delete_image",
+                    input_params=[
+                        RPCParam("image_id"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/get_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="get_image",
+                    input_params=[
+                        RPCParam("image_id"),
+                        ],
+                    output_params=[
+                        RPCParam("image", "ImageInfoItem"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/create_vm", CalRequestHandler,
+                mk_attrs(
+                    cal_method="create_vm",
+                    input_params=[
+                        RPCParam("vm", "VMInfoItem"),
+                        ],
+                    output_params=[
+                        RPCParam("vm_id"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/start_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="start_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/stop_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="stop_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/reboot_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="reboot_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vm_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vm_list",
+                        output_params=[
+                            RPCParam("vms", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        output_params=[
+                            RPCParam("vms", "VMInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_flavor",
+                        input_params=[
+                            RPCParam("flavor", "FlavorInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_flavor",
+                        input_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_flavor_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_flavor_list",
+                        output_params=[
+                            RPCParam("flavors", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_flavor",
+                        input_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        output_params=[
+                            RPCParam("flavor", "FlavorInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_network",
+                        input_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("network_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_network",
+                        input_params=[
+                            RPCParam("network_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_network",
+                        input_params=[
+                            RPCParam("network_id"),
+                            ],
+                        output_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_network_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_network_list",
+                        output_params=[
+                            RPCParam("networks", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_management_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_management_network",
+                        output_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_port",
+                        input_params=[
+                            RPCParam("port", "PortInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("port_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_port",
+                        input_params=[
+                            RPCParam("port_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_port",
+                        input_params=[
+                            RPCParam("port_id"),
+                            ],
+                        output_params=[
+                            RPCParam("port", "PortInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_port_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_port_list",
+                        output_params=[
+                            RPCParam("ports", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_virtual_link",
+                        input_params=[
+                            RPCParam("link_params", "VirtualLinkReqParams"),
+                            ],
+                        output_params=[
+                            RPCParam("link_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_virtual_link",
+                        input_params=[
+                            RPCParam("link_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_virtual_link",
+                        input_params=[
+                            RPCParam("link_id"),
+                            ],
+                        output_params=[
+                            RPCParam("response", "VirtualLinkInfoParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_virtual_link_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_virtual_link_list",
+                        output_params=[
+                            RPCParam("resources", "VNFResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_vdu",
+                        input_params=[
+                            RPCParam("vdu_params", "VDUInitParams"),
+                            ],
+                        output_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/modify_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="modify_vdu",
+                        input_params=[
+                            RPCParam("vdu_params", "VDUModifyParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_vdu",
+                        input_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vdu",
+                        input_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        output_params=[
+                            RPCParam("response", "VDUInfoParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vdu_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vdu_list",
+                        output_params=[
+                            RPCParam("resources", "VNFResources"),
+                            ],
+                        ),
+                    )
+            ])
diff --git a/rwcal/rift/cal/server/operations.py b/rwcal/rift/cal/server/operations.py
new file mode 100644
index 0000000..316525e
--- /dev/null
+++ b/rwcal/rift/cal/server/operations.py
@@ -0,0 +1,200 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file operations.py
+@author Varun Prasad(varun.prasad@riftio.com)
+@date 2016-06-14
+"""
+
+import daemon
+import daemon.pidfile
+import os
+import signal
+import subprocess
+import sys
+import time
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwLog', '1.0')
+
+from . import server as cal_server
+import rift.cal.utils as cal_util
+import rift.rwcal.cloudsim.shell as shell
+
+
+
+class CloudsimServerOperations(cal_util.CloudSimCalMixin):
+    """Convenience class to provide start, stop and cleanup operations
+    
+    Attributes:
+        log (logging): Log instance
+        PID_FILE (str): Location to generate the PID file.
+    """
+    PID_FILE = "/var/log/rift/cloudsim_server.pid"
+
+    def __init__(self, log):
+        super().__init__()
+        self.log = log
+
+    @property
+    def pid(self):
+        pid = None
+        try:
+            with open(self.PID_FILE) as fh:
+                pid = fh.readlines()[0]
+                pid = int(pid.strip())
+        except IndexError:
+            self.log.error("Looks like the pid file does not contain a valid ID")
+        except OSError:
+            self.log.debug("No PID file found.")
+
+        return pid
+
+    def is_pid_exists(self, pid):
+        try:
+            os.kill(pid, 0)
+        except OSError:
+            return False
+
+        return True
+
+    def start_server(self, foreground=False):
+        """Start the tornado app """
+
+        # Before starting verify if all requirements are satisfied
+        cal_server.CalServer.verify_requirements(self.log)
+
+        # If the /var/log directory is not present, then create it first.
+        if not os.path.exists(os.path.dirname(self.PID_FILE)):
+            self.log.warning("Creating /var/log/rift directory for log storage")
+            os.makedirs(os.path.dirname(self.PID_FILE))
+
+        # Check if an exiting PID file is present, if so check if it has an
+        # associated proc, otherwise it's a zombie file so clean it.
+        # Otherwise the daemon fails silently.
+        if self.pid is not None and not self.is_pid_exists(self.pid):
+            self.log.warning("Removing stale PID file")
+            os.remove(self.PID_FILE)
+
+
+
+        def start(daemon_mode=False):
+
+            log = cal_util.Logger(daemon_mode=daemon_mode, log_name='')
+            log.logger.info("Starting the cloud server.")
+            server = cal_server.CalServer()
+            server.start()
+
+        if foreground:
+            # Write the PID file for consistency
+            with open(self.PID_FILE, mode='w') as fh:
+                fh.write(str(os.getpid()) + "\n")
+            start()
+        else:
+            context = daemon.DaemonContext(
+                pidfile=daemon.pidfile.PIDLockFile(self.PID_FILE))
+            with context:
+                start(daemon_mode=True)
+
+    def stop_server(self):
+        """Stop the daemon"""
+
+        def kill_pid(pid, sig):
+            self.log.info("Sending {} to PID: {}".format(str(sig), pid))
+            os.kill(pid, sig)
+
+
+        def search_and_kill():
+            """In case the PID file is not found, and the server is still
+            running, as a last resort we search thro' the process table
+            and stop the server."""
+            cmd = ["pgrep", "-u", "daemon,root", "python3"]
+
+            try:
+               pids = subprocess.check_output(cmd)
+            except subprocess.CalledProcessError:
+                self.log.error("No Cloudsim server process found. "
+                        "Please ensure Cloudsim server is running")
+                return
+
+            pids = map(int, pids.split())
+
+            for pid in pids:
+                if pid != os.getpid():
+                    kill_sequence(pid)
+
+        def wait_till_exit(pid, timeout=30, retry_interval=1):
+            start_time = time.time()
+
+            while True:
+                if not self.is_pid_exists(pid):
+                    msg = "Killed {}".format(pid)
+                    print (msg)
+                    return True
+
+                time_elapsed = time.time() - start_time
+                time_remaining = timeout - time_elapsed
+
+                self.log.info("Process still exists, trying again in {} sec(s)"
+                    .format(retry_interval))
+
+                if time_remaining <= 0:
+                    msg = 'Process {} has not yet terminated within {} secs. Trying SIGKILL'
+                    self.log.error(msg.format(pid, timeout))
+                    return False
+
+                time.sleep(min(time_remaining, retry_interval))
+
+        def kill_sequence(pid):
+            kill_pid(pid, signal.SIGHUP)
+            wait_till_exit(pid, timeout=10, retry_interval=2)
+            kill_pid(pid, signal.SIGKILL)
+            status = wait_till_exit(pid)
+
+            if status:
+                # Remove the lock file.
+                shell.command("rm -f {}".format(self.PID_FILE))
+
+        pid = self.pid
+        if pid is not None:
+            self.log.warning("Server running with PID: {} found, "
+                             "trying to stop it".format(pid))
+            kill_sequence(pid)
+        else:
+            self.log.warning("No PID file found. Searching the process "
+                            "table to find PID")
+            search_and_kill()
+
+    def clean_server(self, images=False):
+        """Clean all resource using rest APIs. """
+
+        # Delete VDUs
+        _, vdus = self.cal.get_vdu_list(self.account)
+        for vdu in vdus.vdu_info_list:
+            self.cal.delete_vdu(self.account, vdu.vdu_id)
+
+        # Delete Vlinks
+        _, vlinks = self.cal.get_virtual_link_list(self.account)
+        for vlink in vlinks.virtual_link_info_list:
+            self.cal.delete_virtual_link(self.account, vlink.virtual_link_id)
+
+        if images:
+            _, images = self.cal.get_image_list(self.account)
+            for image in images.image_info_list:
+                self.cal.delete_image(self.account, image.id)
diff --git a/rwcal/rift/cal/server/server.py b/rwcal/rift/cal/server/server.py
new file mode 100644
index 0000000..ef8b0d4
--- /dev/null
+++ b/rwcal/rift/cal/server/server.py
@@ -0,0 +1,151 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file cal_server.py
+@author Austin Cormier(austin.cormier@riftio.com)
+@author Varun Prasad(varun.prasad@riftio.com)
+@date 2016-06-14
+"""
+
+import asyncio
+import logging
+import os
+import signal
+import sys
+
+import tornado
+import tornado.httpserver
+import tornado.web
+import tornado.platform.asyncio
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwLog', '1.0')
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+    RwcalYang,
+    RwLog
+)
+
+import rw_peas
+import rift.tasklets
+import rift.rwcal.cloudsim.net
+import rift.rwcal.cloudsim.lvm as lvm
+import rift.rwcal.cloudsim.lxc as lxc
+import rift.rwcal.cloudsim.shell as shell
+
+from . import app
+
+logger = logging.getLogger(__name__)
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class CalServer():
+    HTTP_PORT = 9002
+    cal_interface = None
+
+    @staticmethod
+    def verify_requirements(log):
+        """
+        Check if all the requirements are met
+        1. bridgeutils should be installed
+        2. The user should be root
+        """
+        try:
+            shell.command('/usr/sbin/brctl show')
+        except shell.ProcessError:
+            log.exception('/usr/sbin/brctl command not found, please install '
+                'bridge-utils (yum install bridge-utils)')
+            sys.exit(1)
+
+        if os.geteuid() != 0:
+            log.error("User should be root to start the server.")
+            sys.exit(1)
+
+    def __init__(self, logging_level=logging.DEBUG):
+        self.app = None
+        self.server = None
+        self.log_hdl = RwLog.Ctx.new("a")
+        self.log = logger
+        self.log.setLevel(logging_level)
+
+    def get_cal_interface(self):
+        self.log.debug("Creating CAL interface.")
+        if CalServer.cal_interface is None:
+            plugin = rw_peas.PeasPlugin('rwcal_cloudsim', 'RwCal-1.0')
+            engine, info, extension = plugin()
+
+            CalServer.cal_interface = plugin.get_interface("Cloud")
+            CalServer.cal_interface.init(self.log_hdl)
+
+        return CalServer.cal_interface
+
+    def cleanup(self):
+        self.log.info("Cleaning up resources and backing store.")
+        for container in lxc.containers():
+            self.log.debug("Stopping {}".format(container))
+            lxc.stop(container)
+
+        for container in lxc.containers():
+            lxc.destroy(container)
+
+        lvm.destroy('rift')
+
+
+    def start(self):
+        """Start the server."""
+
+        cal = self.get_cal_interface()
+        account = RwcalYang.CloudAccount(account_type="cloudsim")
+
+        tornado.platform.asyncio.AsyncIOMainLoop().install()
+        loop = asyncio.get_event_loop()
+
+        self.app = app.CalProxyApp(self.log, loop, cal, account)
+        self.server = tornado.httpserver.HTTPServer(self.app)
+
+        self.log.info("Starting Cal Proxy Http Server on port %s",
+                      CalServer.HTTP_PORT)
+        self.server.listen(CalServer.HTTP_PORT)
+
+        def startup():
+            self.log.info("Creating a default network")
+            rift.rwcal.cloudsim.net.virsh_initialize_default()
+            self.log.info("Creating backing store")
+            lvm.create('rift')
+
+        loop.add_signal_handler(signal.SIGHUP, self.cleanup)
+        loop.add_signal_handler(signal.SIGTERM, self.cleanup)
+
+        try:
+            loop.run_in_executor(None, startup)
+            loop.run_forever()
+        except KeyboardInterrupt:
+            self.cleanup()
+        except Exception as exc:
+            self.log.exception(exc)
+
+
+    def stop(self):
+      try:
+         self.server.stop()
+      except Exception:
+         self.log.exception("Caught Exception in LP stop:", sys.exc_info()[0])
+         raise
diff --git a/rwcal/rift/cal/utils.py b/rwcal/rift/cal/utils.py
new file mode 100644
index 0000000..c99bf9d
--- /dev/null
+++ b/rwcal/rift/cal/utils.py
@@ -0,0 +1,123 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file utils.py
+@author Varun Prasad(varun.prasad@riftio.com)
+@date 2016-06-14
+"""
+
+import logging
+import os
+import sys
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwLog', '1.0')
+
+from gi.repository import RwcalYang
+import rift.rwcal.cloudsim.net as net
+import rwlogger
+import rw_peas
+
+
+class Logger():
+    """A wrapper to hold all logging related configuration. """
+    LOG_FILE = "/var/log/rift/cloudsim_server.log"
+    FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+
+    def __init__(self, daemon_mode=True, log_name=__name__, log_level=logging.DEBUG):
+        """
+        Args:
+            daemon_mode (bool, optional): If set, then logs are pushed to the
+                    file.
+            log_name (str, optional): Logger name
+            log_level (<Log level>, optional): INFO, DEBUG ..
+        """
+        self.logger = logging.getLogger(log_name)
+        logging.basicConfig(level=log_level, format=self.FORMAT)
+
+        if daemon_mode:
+            handler = logging.FileHandler(self.LOG_FILE)
+            handler.setFormatter(logging.Formatter(self.FORMAT))
+            self.logger.addHandler(handler)
+
+
+
+class CloudSimCalMixin(object):
+    """Mixin class to provide cal plugin and account access to classes.
+    """
+
+    def __init__(self):
+        self._cal, self._account = None, None
+
+    @property
+    def cal(self):
+        if not self._cal:
+            self.load_plugin()
+        return self._cal
+
+    @property
+    def account(self):
+        if not self._account:
+            self.load_plugin()
+        return self._account
+
+    def load_plugin(self):
+        """Load the cal plugin and account
+
+        Returns:
+            Tuple (Cal, Account)
+        """
+        plugin = rw_peas.PeasPlugin('rwcal_cloudsimproxy', 'RwCal-1.0')
+        engine, info, extension = plugin()
+
+        rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+        cal = plugin.get_interface("Cloud")
+        rc = cal.init(rwloggerctx)
+
+        account = RwcalYang.CloudAccount()
+        account.account_type = "cloudsim_proxy"
+        account.cloudsim_proxy.host = "192.168.122.1"
+
+        self._cal, self._account = cal, account
+
+
+def check_and_create_bridge(func):
+    """Decorator that checks if a bridge is available in the VM, if not checks
+    for permission and tries to create one.
+    """
+
+    def func_wrapper(*args, **kwargs):
+        logging.debug("Checking if bridge exists")
+
+        if net.bridge_exists('virbr0'):
+            logging.debug("Bridge exists, can proceed with further operations.")
+        else:
+            logging.warning("No Bridge exists, trying to create one.")
+
+            if os.geteuid() != 0:
+                logging.error("No bridge exists and cannot create one due to "
+                    "insufficient privileges. Please create it manually using "
+                    "'virsh net-start default' or re-run the same command as root.")
+                sys.exit(1)
+
+            net.virsh_initialize_default()
+
+        return func(*args, **kwargs)
+
+    return func_wrapper
+
diff --git a/rwcal/src/CMakeLists.txt b/rwcal/src/CMakeLists.txt
new file mode 100644
index 0000000..9bbe77f
--- /dev/null
+++ b/rwcal/src/CMakeLists.txt
@@ -0,0 +1,38 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 05/22/2014
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+add_definitions(-std=gnu99)
+
+add_library(rwcal_api SHARED
+  rwcal_py.c)
+
+target_link_libraries(rwcal_api PRIVATE
+  rwcal-1.0
+  rwcal_yang_gen
+  rwlib
+  rw_vx_plugin
+  peas-1.0)
+
+add_dependencies(rwcal_api rwmanifest_yang.headers)
+
+install(TARGETS rwcal_api LIBRARY DESTINATION usr/lib COMPONENT ${PKG_LONG_NAME})
+
+install(PROGRAMS rwvim.py DESTINATION usr/bin COMPONENT ${PKG_LONG_NAME})
diff --git a/rwcal/src/Makefile b/rwcal/src/Makefile
new file mode 100644
index 0000000..14f3400
--- /dev/null
+++ b/rwcal/src/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 05/22/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/src/rwcal_py.c b/rwcal/src/rwcal_py.c
new file mode 100644
index 0000000..1b9dbda
--- /dev/null
+++ b/rwcal/src/rwcal_py.c
@@ -0,0 +1,60 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+#include <libpeas/peas.h>
+
+#include "rwcal-api.h"
+
+rwcal_module_ptr_t rwcal_module_alloc()
+{
+  rwcal_module_ptr_t rwcal;
+
+  rwcal = (rwcal_module_ptr_t)malloc(sizeof(struct rwcal_module_s));
+  if (!rwcal)
+    return NULL;
+
+  bzero(rwcal, sizeof(struct rwcal_module_s));
+
+  rwcal->framework = rw_vx_framework_alloc();
+  if (!rwcal->framework)
+    goto err;
+
+  rw_vx_require_repository("RwCal", "1.0");
+
+  goto done;
+
+err:
+  rwcal_module_free(&rwcal);
+
+done:
+
+  return rwcal;
+}
+
+void rwcal_module_free(rwcal_module_ptr_t * rwcal)
+{
+  if ((*rwcal)->cloud)
+    g_object_unref((*rwcal)->cloud);
+
+  free(*rwcal);
+  *rwcal = NULL;
+
+  return;
+}
diff --git a/rwcal/src/rwvim.py b/rwcal/src/rwvim.py
new file mode 100755
index 0000000..18cf087
--- /dev/null
+++ b/rwcal/src/rwvim.py
@@ -0,0 +1,420 @@
+#!/usr/bin/python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 07/24/2014
+# 
+
+"""
+This is a skeletal python tool that invokes the rwcal plugin
+to perform cloud operations.
+"""
+
+import argparse
+import os
+import socket
+import sys
+import logging
+
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import GObject, Peas, GLib, GIRepository
+from gi.repository import RwCal, RwTypes
+
+def resource_list_subcommand(rwcloud, cmdargs):
+    status, flavorinfo = rwcloud.get_flavor_list()
+    status, vminfo = rwcloud.get_vm_list()
+    if vminfo is None:
+        return
+
+    hosts = {}
+
+    # no yaml module installed for Python3, hack for now
+    if cmdargs.hostinfo_file_name:
+        with open(cmdargs.hostinfo_file_name, 'r') as f:
+            lines = f.readlines()
+
+        host = None
+        for l in lines:
+            l = l.strip()
+
+            if l == 'hosts:':
+                continue
+
+            if l == '-':
+                if host:
+                    hosts[host['name']] = host
+                    #hosts.append(host)
+                host = {}
+                continue
+
+            k,v = l.split(':')
+            host[k.strip()] = v.strip()
+
+    # Collect the unique Top of Rack (TOR) swithes
+    tors = set(hosts[vm.host_name]['tor'].lower() for vm in vminfo.vminfo_list)
+
+    print("resources:")
+    for vm in vminfo.vminfo_list:
+        _, __, host_ip_list  = socket.gethostbyaddr(vm.host_name)
+
+        print(" -")
+        print("    name: {}".format(vm.vm_name))
+        print("    osid: {}".format(vm.vm_id))
+        print("    host_name: {}".format(vm.host_name))
+        print("    host_ip: {}".format(host_ip_list[0]))
+        controller, scratch = cmdargs.auth_url[7:].split(':')
+        print("    controller: {}".format(controller))
+        print("    tor: {}".format(hosts[vm.host_name]['tor']))
+        print("    image_name: {}".format(vm.image_name))
+        print("    flavor_name: {}".format(vm.flavor_name))
+        print("    availability_zone: {}".format(vm.availability_zone))
+        print("    private_ip_list: {}".format(
+                sorted(v.ip_address for v in vm.private_ip_list)
+        ))
+        # select the 10.0 network for management ip
+        for p in vm.private_ip_list:
+            if p.ip_address.startswith('10.0.'):
+                print("    ip_address: {}".format(p.ip_address))
+                break;
+
+        print("    public_ip_list: {}".format(
+                [v.ip_address for v in vm.public_ip_list]
+        ))
+        for flavor in flavorinfo.flavorinfo_list:
+            if flavor.name == vm.flavor_name:
+                print("    vcpu: {}".format(flavor.vcpus))
+                print("    ram: {}".format(flavor.memory))
+                print("    disk: {}".format(flavor.disk))
+                print("    host_aggregate_list: {}".format(
+                        [a.name for a in flavor.host_aggregate_list]
+                ))
+                print("    pci_passthrough_device_list: {}".format(
+                        [(d.alias,d.count) for d in flavor.pci_passthrough_device_list]
+                ))
+                # Number of openflow switches this resource connects to are the
+                # number of TOR switches for the pool for demos
+                print("    num_openflow_switches: {}".format(len(tors)))
+                # The number of legacy switches are 0 for demos
+                print("    num_legacy_switches: 0")
+                print("    epa_attributes:")
+
+                # HACK: rw_wag* VMs trusted_execution is always TRUE
+                if vm.vm_name.startswith('rw_wag'):
+                    trusted_execution = 'TRUE'
+                else:
+                    trusted_execution = str(flavor.trusted_host_only).upper()
+                print("        trusted_execution: {}".format(trusted_execution))
+                print("        ddio: {}".format(hosts[vm.host_name]['ddio']))
+                print("        cat: {}".format(hosts[vm.host_name]['cat']))
+                print("        ovs_acceleration: {}".format(hosts[vm.host_name]['ovs_acceleration']))
+                print("        mem_page_size: {}".format(flavor.mem_page_size))
+                if flavor.cpu_threads:
+                    print("        cpu_threads: {}".format(flavor.cpu_threads))
+                print("        cpu_pinning_policy: {}".format(flavor.cpu_policy))
+                # print("            numa_policy: {{ node_cnt: {} }}".format(flavor.numa_node_cnt))
+                print("        numa_node_cnt: {}".format(flavor.numa_node_cnt))
+
+                # if any of the PCI passthrough devices are Coleto Creek
+                # set qat to accel
+                qat=False
+                passthrough=False
+                rrc=False
+                for d in flavor.pci_passthrough_device_list:
+                    if 'COLETO' in d.alias:
+                        qat=True
+                        break
+                    elif '10G' in d.alias:
+                        passthrough=True
+                    elif '100G' in d.alias:
+                        passthrough=True
+                        rrc=True
+                # NOTE: The following can break if SRIOV is used
+                # But for the demos 1,2,3 SRIOV is not used
+                # This is updated logic to set the nic to default to Niantic
+                # if 100G is not in the devise list.
+                if rrc:
+                    print("        nic: RRC")
+                else:
+                    print("        nic: NIANTIC")
+
+                if passthrough or hosts[vm.host_name]['ovs_acceleration'].upper() != 'DISABLED':
+                    print("        dpdk_accelerated: TRUE")
+                else:
+                    print("        dpdk_accelerated: FALSE")
+
+                if passthrough:
+                    print("        pci_passthrough: TRUE")
+                else:
+                    print("        pci_passthrough: FALSE")
+
+                if qat:
+                    print("        quick_assist_policy: MANDATORY")
+                else:
+                    print("        quick_assist_policy: NOACCEL")
+
+                break
+    
+def resource_subcommand(rwcloud, cmdargs):
+    """Process the resources subcommand"""
+
+    if cmdargs.which == 'list':
+        resource_list_subcommand(rwcloud, cmdargs)
+
+def vm_list_subcommand(rwcloud, cmdargs):
+    status, vminfo = rwcloud.get_vm_list()
+    for vm in vminfo.vminfo_list:
+        print(vm)
+
+def vm_show_subcommand(rwcloud, cmdargs):
+    status, vm = rwcloud.get_vm(cmdargs.id)
+    print(vm)
+
+def vm_create_subcommand(cmdargs):
+    pass
+
+def vm_destroy_subcommand(cmdargs):
+    pass
+
+def vm_reboot_subcommand(cmdargs):
+    pass
+
+def vm_start_subcommand(cmdargs):
+    pass
+
+def vm_subcommand(rwcloud, cmdargs):
+    """Process the vm subcommand"""
+
+    if cmdargs.which == 'list':
+        vm_list_subcommand(rwcloud, cmdargs)
+    elif cmdargs.which == 'show':
+        vm_show_subcommand(rwcloud, cmdargs)
+    elif cmdargs.which == 'create':
+        vm_create_subcommand(cmdargs)
+    elif cmdargs.which == 'reboot':
+        vm_reboot_subcommand(cmdargs)
+    elif cmdargs.which == 'start':
+        vm_start_subcommand(cmdargs)
+    elif cmdargs.which == 'destroy':
+        vm_destroy_subcommand(cmdargs)
+
+def flavor_list_subcommand(rwcloud, cmdargs):
+    status, flavorinfo = rwcloud.get_flavor_list()
+    for flavor in flavorinfo.flavorinfo_list:
+        print(flavor)
+
+def flavor_show_subcommand(rwcloud, cmdargs):
+    status, flavor = rwcloud.get_flavor(cmdargs.id)
+    print(flavor)
+
+def flavor_subcommand(rwcloud, cmdargs):
+    """Process the flavor subcommand"""
+
+    if cmdargs.which == 'list':
+        flavor_list_subcommand(rwcloud, cmdargs)
+    elif cmdargs.which == 'show':
+        flavor_show_subcommand(rwcloud, cmdargs)
+
+
+def main(args=sys.argv[1:]):
+    logging.basicConfig(format='RWCAL %(message)s')
+
+    ##
+    # Command line argument specification
+    ##
+    desc="""This tool is used to manage the VMs"""
+    parser = argparse.ArgumentParser(description=desc)
+    subparsers = parser.add_subparsers()
+
+    # ipaddr = socket.gethostbyname(socket.getfqdn())
+    # default_auth_url = 'http://%s:35357/v2.0/tokens' % ipaddr
+    default_auth_url = "http://10.64.1.31:35357/v2.0/tokens"
+
+    parser.add_argument('-t', '--provider-type', dest='provider_type',
+                        type=str, default='OPENSTACK',
+                        help='Cloud provider type (default: %(default)s)')
+    parser.add_argument('-u', '--user-name', dest='user',
+                        type=str, default='demo',
+                        help='User name (default: %(default)s)')
+    parser.add_argument('-p', '--password', dest='passwd',
+                        type=str, default='mypasswd',
+                        help='Password (default: %(default)s)')
+    parser.add_argument('-n', '--tenant-name', dest='tenant',
+                        type=str, default='demo',
+                        help='User name (default: %(default)s)')
+    parser.add_argument('-a', '--auth-url', dest='auth_url',
+                        type=str, default=default_auth_url,
+                        help='Password (default: %(default)s)')
+
+    ##
+    # Subparser for Resources
+    ##
+    resource_parser = subparsers.add_parser('resource')
+    resource_subparsers = resource_parser.add_subparsers()
+
+    # List resource subparser
+    resource_list_parser = resource_subparsers.add_parser('list')
+    resource_list_parser.set_defaults(which='list')
+    resource_list_parser.add_argument('-f', '--hostinfo-file-name', 
+                                  dest='hostinfo_file_name', 
+                                  required=True,
+                                  type=str,
+                                  help='name of the static yaml file containing host information')
+
+    resource_parser.set_defaults(func=resource_subcommand)
+
+    ##
+    # Subparser for Flavor
+    ##
+    flavor_parser = subparsers.add_parser('flavor')
+    flavor_subparsers = flavor_parser.add_subparsers()
+
+    # List flavor subparser
+    flavor_list_parser = flavor_subparsers.add_parser('list')
+    flavor_list_parser.set_defaults(which='list')
+
+    # Show flavor subparser
+    flavor_show_parser = flavor_subparsers.add_parser('show')
+    flavor_show_parser.add_argument('id', type=str)
+    flavor_show_parser.set_defaults(which='show')
+
+    flavor_parser.set_defaults(func=flavor_subcommand)
+
+    ##
+    # Subparser for VM
+    ##
+    vm_parser = subparsers.add_parser('vm')
+    vm_subparsers = vm_parser.add_subparsers()
+
+    # Create VM subparser
+    vm_create_parser = vm_subparsers.add_parser('create')
+    vm_create_parser.add_argument('-c', '--count',
+                                  type=int, default=1,
+                                  help='The number of VMs to launch '
+                                       '(default: %(default)d)')
+    vm_create_parser.add_argument('-i', '--image',
+                                  default='rwopenstack_vm',
+                                  help='Specify the image for the VM')
+    vm_create_parser.add_argument('-n', '--name',
+                                  help='Specify the name of the VM')
+    vm_create_parser.add_argument('-f', '--flavor',
+                                  help='Specify the flavor for the VM')
+    vm_create_parser.add_argument('-R', '--reserve', dest='reserve_new_vms',
+                                  action='store_true', help='reserve any newly created VMs')
+    vm_create_parser.add_argument('-s', '--single', dest='wait_after_create',
+                                  action='store_true', 
+                                  help='wait for each VM to start before creating the next')
+
+    vm_create_parser.set_defaults(which='create')
+
+    # Reboot VM subparser
+    vm_reboot_parser = vm_subparsers.add_parser('reboot')
+    group = vm_reboot_parser.add_mutually_exclusive_group()
+    group.add_argument('-n', '--vm-name', dest='vm_name',
+                       type=str,
+                       help='Specify the name of the VM')
+    group.add_argument('-a', '--reboot-all',
+                       dest='reboot_all', action='store_true',
+                       help='Reboot all VMs')
+    vm_reboot_parser.add_argument('-s', '--sleep', 
+                                  dest='sleep_time', 
+                                  type=int, default=4, 
+                                  help='time in seconds to sleep between reboots')
+    vm_reboot_parser.set_defaults(which='reboot')
+
+    # Destroy VM subparser
+    vm_destroy_parser = vm_subparsers.add_parser('destroy')
+    group = vm_destroy_parser.add_mutually_exclusive_group()
+    group.add_argument('-n', '--vm-name', dest='vm_name',
+                       type=str,
+                       help='Specify the name of the VM (accepts regular expressions)')
+    group.add_argument('-a', '--destroy-all',
+                       dest='destroy_all', action='store_true',
+                       help='Delete all VMs')
+    group.add_argument('-w', '--wait',
+                       dest='wait', action='store_true',
+                       help='destroy all and wait until all VMs have exited')
+    vm_destroy_parser.set_defaults(which='destroy')
+
+    # List VM subparser
+    vm_list_parser = vm_subparsers.add_parser('list')
+    vm_list_parser.set_defaults(which='list')
+    vm_list_parser.add_argument('-i', '--ips_only', dest='ipsonly',
+                                action='store_true',
+                                help='only list IP addresses')
+
+    # Show vm subparser
+    vm_show_parser = vm_subparsers.add_parser('show')
+    vm_show_parser.add_argument('id', type=str)
+    vm_show_parser.set_defaults(which='show')
+    vm_parser.set_defaults(func=vm_subcommand)
+
+    cmdargs = parser.parse_args(args)
+
+    # Open the peas engine
+    engine = Peas.Engine.get_default()
+
+    # Load our plugin proxy into the g_irepository namespace
+    default = GIRepository.Repository.get_default()
+    GIRepository.Repository.require(default, "RwCal", "1.0", 0)
+
+    # Enable python language loader
+    engine.enable_loader("python3");
+
+    # Set the search path for peas engine,
+    # rift-shell sets the PLUGINDIR and GI_TYPELIB_PATH
+    paths = set([])
+    paths = paths.union(os.environ['PLUGINDIR'].split(":"))
+    for path in paths:
+        engine.add_search_path(path, path)
+
+    # Load the rwcal python plugin and create the extension.
+    info = engine.get_plugin_info("rwcal-plugin")
+    if info is None:
+        print("Error loading rwcal-python plugin")
+        sys.exit(1)
+    engine.load_plugin(info)
+    rwcloud = engine.create_extension(info, RwCal.Cloud, None)
+
+    # For now cloud credentials are hard coded
+    if cmdargs.provider_type == 'OPENSTACK':
+        provider_type = RwCal.CloudType.OPENSTACK_AUTH_URL
+    elif cmdargs.provider_type == 'EC2_US_EAST':
+        provider_type = RwCal.CloudType.EC2_US_EAST
+    elif cmdargs.provider_type == 'VSPHERE':
+        provider_type = RwCal.CloudType.VSPHERE
+    else:
+        sys.exit("Cloud provider %s is NOT supported yet" % cmdargs.provider_type)
+
+
+    if not 'RIFT_SHELL' in os.environ:
+        sys.stderr.write("This tool should be run from inside a rift-shell")
+
+    status = rwcloud.init(provider_type, 
+                          cmdargs.user, 
+                          cmdargs.passwd, 
+                          cmdargs.auth_url,
+                          cmdargs.tenant);
+
+    assert status == RwTypes.RwStatus.SUCCESS
+
+    cmdargs.func(rwcloud, cmdargs)
+
+if __name__ == "__main__":
+    main()
+
diff --git a/rwcal/test/CMakeLists.txt b/rwcal/test/CMakeLists.txt
new file mode 100644
index 0000000..79e66c5
--- /dev/null
+++ b/rwcal/test/CMakeLists.txt
@@ -0,0 +1,67 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 06/27/2014
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs cal_module_test)
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
+
+# rift_gtest(unittest_rwcal_cloud
+#   TEST_SRCS rwcal_cloud_gtest.cpp
+#   TEST_LIBS
+#     rwcal_api
+#     rwcal_yang_gen
+# )
+
+rift_gtest(unittest_rwcal_callback
+  TEST_SRCS rwcal_callback_gtest.cpp
+  TEST_LIBS
+    rwcal-1.0
+    rwcal_api
+)
+
+##
+# Add the basic plugin python test
+##
+#rift_py3test(openstack_cal_tests
+#  LONG_UNITTEST_TARGET
+#  TEST_ARGS -m pytest --junit-xml=${RIFT_UNITTEST_DIR}/openstack_cal/unittest.xml #${CMAKE_CURRENT_SOURCE_DIR}/test_rwcal_openstack_pytest.py
+#)
+
+
+add_executable(rwcal_dump rwcal_dump.cpp)
+target_link_libraries(rwcal_dump
+  rwcal_api
+  rwlib
+  rwyang
+  rwcal_yang_gen
+  CoreFoundation
+  glib-2.0
+  protobuf-c
+)
+
+# added for 4.0
+install(
+  FILES 
+    RIFT.ware-ready.py 
+    openstack_resources.py
+  DESTINATION usr/bin
+  COMPONENT ${PKG_LONG_NAME}
+)
+
diff --git a/rwcal/test/RIFT.ware-ready.py b/rwcal/test/RIFT.ware-ready.py
new file mode 100755
index 0000000..1cd69f1
--- /dev/null
+++ b/rwcal/test/RIFT.ware-ready.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import re
+import sys
+from rift.rwcal.openstack.openstack_drv import OpenstackDriver
+
+
+
+def test_openstack(drv):
+    print("checking endpoints")
+    for ep in [ 'compute', 'image', 'network', 'metering' ]: 
+        url = drv.ks_drv.get_service_endpoint(ep, 'publicURL')
+        print("%s: %s" % ( ep, url))
+        if re.search(url, '127.0.0'): 
+            raise Exception("endpoint %s is using a loopback URL: %s" % ( ep, url))
+
+    def verify(name, min, count):
+        if count < min:
+            raise Exception("only %d instances of %s found. Minimum is %d" % ( count, name, min))
+        print("found %d %s" % ( count, name ))
+        
+    verify("images"     , 1, len(drv.glance_image_list()))
+    verify("flavors "    , 1, len(drv.nova_flavor_list()))
+    verify("floating ips "    , 1, len(drv.nova_floating_ip_list()))
+    verify("servers"     , 0, len(drv.nova_server_list()))
+    verify("networks"     , 1, len(drv.neutron_network_list()))
+    verify("subnets"     , 1, len(drv.neutron_subnet_list()))
+    verify("ports"         , 1, len(drv.neutron_port_list()))
+    #verify("ceilometers"     , 1, len(drv.ceilo_meter_list()))
+    
+
+
+if len(sys.argv) != 6:
+    print("ARGS are admin_user admin_password auth_url tenant_name mgmt_network_name")
+    print("e.g. %s pluto mypasswd http://10.95.4.2:5000/v3 demo private" % __file__ )
+    sys.exit(1)
+
+args=tuple(sys.argv[1:6])
+print("Using args \"%s\"" % ",".join(args))
+
+try:
+    v3 = OpenstackDriver(*args)
+except Exception as e:
+    print("\n\nunable to instantiate a endpoint: %s" % e)
+else:
+    print("\n\n endpoint instantiated")
+    try:
+        test_openstack(v3)
+    except Exception as e:
+        print("\n\nendpoint verification failed: %s" % e)
+    else:
+        print("\n\nSUCCESS! openstack is working")
+        sys.exit(0)
+
+
+
+sys.exit(1)
+
+
+# need to check if any public urls are loopbacks
+# need to check DNS is set up right 
+#    neutron subnet-show private_subnet
+#    host repo.riftio.com  10.64.1.3
+
diff --git a/rwcal/test/aws_resources.py b/rwcal/test/aws_resources.py
new file mode 100644
index 0000000..875de56
--- /dev/null
+++ b/rwcal/test/aws_resources.py
@@ -0,0 +1,370 @@
+#!/usr/bin/python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import os
+import sys
+import uuid
+import rw_peas
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+import argparse
+import logging
+import rwlogger
+import boto3
+import botocore
+
+persistent_resources = {
+    'vms'      : [],
+    'networks' : [],
+}
+
+MISSION_CONTROL_NAME = 'mission-control'
+LAUNCHPAD_NAME = 'launchpad'
+
+RIFT_IMAGE_AMI = 'ami-7070231a'
+
+logging.basicConfig(level=logging.ERROR)
+logger = logging.getLogger('rift.cal.awsresources')
+logger.setLevel(logging.INFO)
+
+def get_cal_plugin():
+    """
+        Load AWS cal plugin
+    """
+    plugin = rw_peas.PeasPlugin('rwcal_aws', 'RwCal-1.0')
+    engine, info, extension = plugin()
+    cal = plugin.get_interface("Cloud")
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+    try:
+        rc = cal.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except Exception as e:
+        logger.error("ERROR:Cal plugin instantiation failed with exception %s",repr(e))
+    else:
+        logger.info("AWS Cal plugin successfully instantiated")
+        return cal
+
+def get_cal_account(**kwargs):
+    """
+    Returns AWS cal account
+    """
+    account                        = RwcalYang.CloudAccount()
+    account.account_type           = "aws"
+    account.aws.key = kwargs['key']
+    account.aws.secret = kwargs['secret']
+    account.aws.region = kwargs['region']
+    if 'ssh_key' in kwargs and kwargs['ssh_key'] is not None:
+        account.aws.ssh_key = kwargs['ssh_key']
+    account.aws.availability_zone = kwargs['availability_zone']
+    if 'vpcid' in kwargs and kwargs['vpcid'] is not None: 
+        account.aws.vpcid =  kwargs['vpcid']
+    if 'default_subnet_id' in kwargs and kwargs['default_subnet_id'] is not None:
+        account.aws.default_subnet_id = kwargs['default_subnet_id']
+    return account
+
+class AWSResources(object):
+    """
+    Class with methods to manage AWS resources
+    """
+    def __init__(self,**kwargs):
+        self._cal      = get_cal_plugin()
+        self._acct     = get_cal_account(**kwargs)
+
+    def _destroy_vms(self):
+        """
+        Destroy VMs
+        """
+        logger.info("Initiating VM cleanup")
+        rc, rsp = self._cal.get_vdu_list(self._acct)
+        vdu_list = [vm for vm in rsp.vdu_info_list if vm.name not in persistent_resources['vms']]
+        logger.info("Deleting VMs : %s" %([x.name for x in vdu_list]))
+
+        for vdu in vdu_list:
+            self._cal.delete_vdu(self._acct, vdu.vdu_id)
+
+        logger.info("VM cleanup complete")
+
+    def _destroy_networks(self):
+        """
+        Destroy Networks
+        """
+        logger.info("Initiating Network cleanup")
+        driver = self._cal._get_driver(self._acct)
+        subnets = driver.get_subnet_list()
+        subnet_list = [subnet for subnet in subnets if subnet.default_for_az is False]
+
+        logger.info("Deleting Networks : %s" %([x.id for x in subnet_list]))
+        for subnet in subnet_list:
+            self._cal.delete_virtual_link(self._acct, subnet.subnet_id)
+        logger.info("Network cleanup complete")
+
+    def destroy_resource(self):
+        """
+        Destroy resources
+        """
+        logger.info("Cleaning up AWS resources")
+        self._destroy_vms()
+        self._destroy_networks()
+        logger.info("Cleaning up AWS resources.......[Done]")
+
+    def _destroy_mission_control(self):
+        """
+        Destroy Mission Control VM
+        """
+        logger.info("Initiating MC VM cleanup")
+        rc, rsp = self._cal.get_vdu_list(self._acct)
+        vdu_list = [vm for vm in rsp.vdu_info_list if vm.name == MISSION_CONTROL_NAME]
+        logger.info("Deleting VMs : %s" %([x.name for x in vdu_list]))
+
+        for vdu in vdu_list:
+            self._cal.delete_vdu(self._acct, vdu.vdu_id)
+        logger.info("MC VM cleanup complete")
+
+    def _destroy_launchpad(self):
+        """
+        Destroy Launchpad VM
+        """
+        logger.info("Initiating LP VM cleanup")
+        rc, rsp = self._cal.get_vdu_list(self._acct)
+        vdu_list = [vm for vm in rsp.vdu_info_list if vm.name == LAUNCHPAD_NAME]
+        logger.info("Deleting VMs : %s" %([x.name for x in vdu_list]))
+
+        for vdu in vdu_list:
+            self._cal.delete_vdu(self._acct, vdu.vdu_id)
+        logger.info("LP VM cleanup complete")
+        
+
+    def create_mission_control(self):
+        """
+        Create Mission Control VM in AWS
+        """ 
+        logger.info("Creating mission control VM")
+        vdu = RwcalYang.VDUInitParams()
+        vdu.name = MISSION_CONTROL_NAME
+        vdu.image_id = RIFT_IMAGE_AMI
+        vdu.flavor_id = 'c3.large'
+        vdu.allocate_public_address = True
+        vdu.vdu_init.userdata = "#cloud-config\n\nruncmd:\n - echo Sleeping for 5 seconds and attempting to start salt-master\n - sleep 5\n - /bin/systemctl restart salt-master.service\n"
+
+        rc,rs=self._cal.create_vdu(self._acct,vdu)
+        assert rc == RwStatus.SUCCESS
+        self._mc_id = rs
+
+        driver = self._cal._get_driver(self._acct)
+        inst=driver.get_instance(self._mc_id)
+        inst.wait_until_running()
+
+        rc,rs =self._cal.get_vdu(self._acct,self._mc_id)
+        assert rc == RwStatus.SUCCESS
+        self._mc_public_ip = rs.public_ip
+        self._mc_private_ip = rs.management_ip
+        
+        logger.info("Started Mission Control VM with id %s and IP Address %s\n",self._mc_id, self._mc_public_ip)
+
+    def create_launchpad_vm(self, salt_master = None):        
+        """
+        Create Launchpad VM in AWS
+        Arguments
+            salt_master (String): String with Salt master IP typically MC VM private IP
+        """
+        logger.info("Creating launchpad VM")
+        USERDATA_FILENAME = os.path.join(os.environ['RIFT_INSTALL'],
+                                 'etc/userdata-template')
+
+        try:
+            fd = open(USERDATA_FILENAME, 'r')
+        except Exception as e:
+                sys.exit(-1)
+        else:
+            LP_USERDATA_FILE = fd.read()
+            # Run the enable lab script when the openstack vm comes up
+            LP_USERDATA_FILE += "runcmd:\n"
+            LP_USERDATA_FILE += " - echo Sleeping for 5 seconds and attempting to start elastic-network-interface\n"
+            LP_USERDATA_FILE += " - sleep 5\n"
+            LP_USERDATA_FILE += " - /bin/systemctl restart elastic-network-interfaces.service\n"
+
+        if salt_master is None:
+            salt_master=self._mc_private_ip
+        node_id = str(uuid.uuid4())
+
+        vdu = RwcalYang.VDUInitParams()
+        vdu.name = LAUNCHPAD_NAME
+        vdu.image_id = RIFT_IMAGE_AMI
+        vdu.flavor_id = 'c3.xlarge'
+        vdu.allocate_public_address = True
+        vdu.vdu_init.userdata = LP_USERDATA_FILE.format(master_ip = salt_master,
+                                          lxcname = node_id)
+        vdu.node_id = node_id
+
+        rc,rs=self._cal.create_vdu(self._acct,vdu)
+        assert rc == RwStatus.SUCCESS
+        self._lp_id = rs
+
+        driver = self._cal._get_driver(self._acct)
+        inst=driver.get_instance(self._lp_id)
+        inst.wait_until_running()
+
+        rc,rs =self._cal.get_vdu(self._acct,self._lp_id)
+        assert rc == RwStatus.SUCCESS
+
+        self._lp_public_ip = rs.public_ip
+        self._lp_private_ip = rs.management_ip
+        logger.info("Started Launchpad VM with id %s and IP Address %s\n",self._lp_id, self._lp_public_ip)
+         
+    def upload_ssh_key_to_ec2(self):
+        """
+         Upload SSH key to EC2 region
+        """
+        driver = self._cal._get_driver(self._acct)
+        key_name = os.getlogin() + '-' + 'sshkey' 
+        key_path = '%s/.ssh/id_rsa.pub' % (os.environ['HOME'])
+        if os.path.isfile(key_path):
+            logger.info("Uploading ssh public key file in path %s with keypair name %s", key_path,key_name)
+            with open(key_path) as fp:
+                driver.upload_ssh_key(key_name,fp.read())
+        else:
+            logger.error("Valid Public key file %s not found", key_path)
+
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to manage AWS resources')
+
+    parser.add_argument('--aws-key',
+                        action = 'store',
+                        dest = 'aws_key',
+                        type = str,
+                        help='AWS key')
+
+    parser.add_argument('--aws-secret',
+                        action = 'store',
+                        dest = 'aws_secret',
+                        type = str,
+                        help='AWS secret')
+
+    parser.add_argument('--aws-region',
+                        action = 'store',
+                        dest = 'aws_region',
+                        type = str,
+                        help='AWS region')
+
+    parser.add_argument('--aws-az',
+                        action = 'store',
+                        dest = 'aws_az',
+                        type = str,
+                        help='AWS Availability zone')
+
+    parser.add_argument('--aws-sshkey',
+                        action = 'store',
+                        dest = 'aws_sshkey',
+                        type = str,
+                        help='AWS SSH Key to login to instance')
+
+    parser.add_argument('--aws-vpcid',
+                        action = 'store',
+                        dest = 'aws_vpcid',
+                        type = str,
+                        help='AWS VPC ID to use to indicate non default VPC')
+
+    parser.add_argument('--aws-default-subnet',
+                        action = 'store',
+                        dest = 'aws_default_subnet',
+                        type = str,
+                        help='AWS Default subnet id in VPC to be used for mgmt network')
+
+    parser.add_argument('--mission-control',
+                        action = 'store_true',
+                        dest = 'mission_control',
+                        help='Create Mission Control VM')
+
+    parser.add_argument('--launchpad',
+                        action = 'store_true',
+                        dest = 'launchpad',
+                        help='Create LaunchPad VM')
+
+    parser.add_argument('--salt-master',
+                        action = 'store',
+                        dest = 'salt_master',
+                        type = str,
+                        help='IP Address of salt controller. Required, if only launchpad  VM is being created.')
+
+    parser.add_argument('--cleanup',
+                        action = 'store',
+                        dest = 'cleanup',
+                        nargs = '+',
+                        type = str,
+                        help = 'Perform resource cleanup for AWS installation. \n Possible options are {all, mc, lp,  vms, networks }')
+
+    parser.add_argument('--upload-ssh-key',
+                         action = 'store_true',
+                         dest = 'upload_ssh_key',
+                         help = 'Upload users SSH public key ~/.ssh/id_rsa.pub')  
+
+    argument = parser.parse_args()
+
+    if (argument.aws_key is None or argument.aws_secret is None or argument.aws_region is None or
+       argument.aws_az is None):
+        logger.error("Missing mandatory params. AWS Key, Secret, Region, AZ and SSH key are mandatory params")
+        sys.exit(-1)
+
+    if (argument.cleanup is None and argument.mission_control is None and argument.launchpad is None 
+        and argument.upload_ssh_key is None):
+        logger.error('Insufficient parameters')
+        sys.exit(-1)
+
+    ### Start processing
+    logger.info("Instantiating cloud-abstraction-layer")
+    drv = AWSResources(key=argument.aws_key, secret=argument.aws_secret, region=argument.aws_region, availability_zone = argument.aws_az, 
+                       ssh_key = argument.aws_sshkey, vpcid = argument.aws_vpcid, default_subnet_id = argument.aws_default_subnet)
+    logger.info("Instantiating cloud-abstraction-layer.......[Done]")
+
+    if argument.upload_ssh_key:
+         drv.upload_ssh_key_to_ec2()
+
+    if argument.cleanup is not None:
+        for r_type in argument.cleanup:
+            if r_type == 'all':
+                drv.destroy_resource()
+                break
+            if r_type == 'vms':
+                drv._destroy_vms()
+            if r_type == 'networks':
+                drv._destroy_networks()
+            if r_type == 'mc':
+                drv._destroy_mission_control()
+            if r_type == 'lp':
+                drv._destroy_launchpad()
+
+    if argument.mission_control == True:
+        drv.create_mission_control()
+
+    if argument.launchpad == True:
+        if argument.salt_master is None and argument.mission_control is False:
+            logger.error('Salt Master IP address not provided to start Launchpad.')
+            sys.exit(-2)
+
+        drv.create_launchpad_vm(argument.salt_master)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwcal/test/cal_module_test/CMakeLists.txt b/rwcal/test/cal_module_test/CMakeLists.txt
new file mode 100644
index 0000000..f637c28
--- /dev/null
+++ b/rwcal/test/cal_module_test/CMakeLists.txt
@@ -0,0 +1,41 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Varun Prasad
+# Creation Date: 21/01/2016
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+install(
+  PROGRAMS
+    cal_module_test
+  DESTINATION usr/rift/systemtest/cal_module_test
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    pytest/conftest.py
+    pytest/cal_module_test.py
+  DESTINATION usr/rift/systemtest/cal_module_test/pytest
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    racfg/cal_module_test.racfg
+  DESTINATION
+    usr/rift/systemtest/cal_module_test
+    COMPONENT ${PKG_LONG_NAME})
+
diff --git a/rwcal/test/cal_module_test/cal_module_test b/rwcal/test/cal_module_test/cal_module_test
new file mode 100755
index 0000000..d7f21b6
--- /dev/null
+++ b/rwcal/test/cal_module_test/cal_module_test
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+SYS_TEST=$RIFT_INSTALL/usr/rift/systemtest/
+PYTEST_DIR=$SYS_TEST/cal_module_test/pytest
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider ${PYTEST_DIR}/cal_module_test.py"
+test_prefix="cal_module_test"
+TEST_NAME="TC_CAL_MODULE_TEST"
+RESULT_XML="cal_module_test.xml"
+
+parse_args "${@}"
+test_cmd="${SCRIPT_TEST}"
+append_args test_cmd os-host "\"${cloud_host}\""
+append_args test_cmd os-user "\"${user}\""
+append_args test_cmd os-tenant ${tenant[0]}
+append_args test_cmd junitprefix "\"${TEST_NAME}\""
+append_args test_cmd junitxml "\"${RIFT_MODULE_TEST}/${RESULT_XML}\""
+
+cd "${PYTEST_DIR}"
+eval ${test_cmd}
+
diff --git a/rwcal/test/cal_module_test/pytest/cal_module_test.py b/rwcal/test/cal_module_test/pytest/cal_module_test.py
new file mode 100644
index 0000000..ca3568f
--- /dev/null
+++ b/rwcal/test/cal_module_test/pytest/cal_module_test.py
@@ -0,0 +1,669 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file cal_test.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 22-Jan-2016
+
+"""
+
+import abc
+import logging
+import os
+import multiprocessing
+import signal
+import time
+import uuid
+import hashlib
+
+import pytest
+
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+# import rift.cal.server as cal_server
+import rw_peas
+import rwlogger
+        
+
+logger = logging.getLogger('rwcal')
+logging.basicConfig(level=logging.INFO)
+
+
+class CloudConfig(object):
+    def __init__(self, cal, account):
+        self.cal = cal
+        self.account = account
+
+    def check_state(self, object_id, object_api, expected_state, state_attr_name="state"):
+        """For a given object (Vm, port etc) checks if the object has
+        reached the expected state.
+        """
+        get_object = getattr(self.cal, object_api)
+        for i in range(100):  # 100 poll iterations...
+            rc, rs = get_object(self.account, object_id)
+
+            curr_state = getattr(rs, state_attr_name)
+            if curr_state == expected_state:
+                break
+            else:
+                time.sleep(2)
+
+        rc, rs = get_object(self.account, object_id)
+        assert rc == RwStatus.SUCCESS
+        assert getattr(rs, state_attr_name) == expected_state
+
+    def start_server(self):
+        pass
+
+    def stop_server(self):
+        pass
+
+    @abc.abstractmethod
+    def _cal(self):
+        pass
+
+    @abc.abstractmethod
+    def _account(self, option):
+        pass
+
+    @abc.abstractmethod
+    def flavor(self):
+        pass
+
+    @abc.abstractmethod
+    def vdu(self):
+        pass
+
+    @abc.abstractmethod
+    def image(self):
+        pass
+
+    @abc.abstractmethod
+    def virtual_link(self):
+        pass
+
+
+class Aws(CloudConfig):
+    def __init__(self, option):
+        """
+        Args:
+            option (OptionParser): OptionParser instance.
+        """
+        self.image_id = 'ami-7070231a'
+        self.virtual_link_id = None
+        self.flavor_id = None
+        self.vdu_id = None
+
+        super().__init__(self._cal(), self._account(option))
+
+    def _cal(self):
+        """
+        Loads rw.cal plugin via libpeas
+        """
+        plugin = rw_peas.PeasPlugin('rwcal_aws', 'RwCal-1.0')
+
+        engine, info, extension = plugin()
+
+        # Get the RwLogger context
+        rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+        cal = plugin.get_interface("Cloud")
+        try:
+            rc = cal.init(rwloggerctx)
+            assert rc == RwStatus.SUCCESS
+        except:
+            logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+        else:
+            logger.info("AWS Cal plugin successfully instantiated")
+        return cal
+
+    def _account(self, option):
+        """
+        Args:
+            option (OptionParser): OptionParser instance.
+
+        Return:
+            CloudAccount details
+        """
+        account = RwcalYang.CloudAccount.from_dict({
+                "account_type": "aws",
+                "aws": {
+                    "key": option.aws_user,
+                    "secret": option.aws_password,
+                    "region": option.aws_region,
+                    "availability_zone": option.aws_zone,
+                    "ssh_key": option.aws_ssh_key
+                }
+            })
+
+        return account
+
+    def flavor(self):
+        """
+        Returns:
+            FlavorInfoItem
+        """
+        flavor = RwcalYang.FlavorInfoItem.from_dict({
+                    "name": str(uuid.uuid4()),
+                    "vm_flavor": {
+                        "memory_mb": 1024,
+                        "vcpu_count": 1,
+                        "storage_gb": 0
+                    }
+            })
+
+        return flavor
+
+    def vdu(self):
+        """Provide AWS specific VDU config.
+
+        Returns:
+            VDUInitParams
+        """
+        vdu = RwcalYang.VDUInitParams.from_dict({
+                "name": str(uuid.uuid4()),
+                "node_id": "123456789012345",
+                "image_id": self.image_id,
+                "flavor_id": "t2.micro"
+            })
+
+        c1 = vdu.connection_points.add()
+        c1.name = str(uuid.uuid4())
+        c1.virtual_link_id = self.virtual_link_id
+
+        return vdu
+
+    def image(self):
+        raise NotImplementedError("Image create APIs are not implemented for AWS")
+
+    def virtual_link(self):
+        """Provide Vlink config
+
+        Returns:
+            VirtualLinkReqParams
+        """
+        vlink = RwcalYang.VirtualLinkReqParams.from_dict({
+                    "name": str(uuid.uuid4()),
+                    "subnet": '172.31.64.0/20',
+            })
+
+        return vlink
+
+
+class Cloudsim(CloudConfig):
+    def __init__(self, option):
+        self.image_id = None
+        self.virtual_link_id = None
+        self.flavor_id = None
+        self.vdu_id = None
+
+        self.server_process = None
+
+
+        super().__init__(self._cal(), self._account(option))
+
+    def _md5(fname, blksize=1048576):
+        hash_md5 = hashlib.md5()
+        with open(fname, "rb") as f:
+            for chunk in iter(lambda: f.read(blksize), b""):
+                hash_md5.update(chunk)
+        return hash_md5.hexdigest()
+                                    
+    def start_server(self):
+        logger = logging.getLogger(__name__)
+        server = cal_server.CloudsimServerOperations(logger)
+        self.server_process = multiprocessing.Process(
+                target=server.start_server,
+                args=(True,))
+        self.server_process.start()
+
+        # Sleep till the backup store is set up
+        time.sleep(30)
+
+    def stop_server(self):
+        self.server_process.terminate()
+
+        # If the process is not killed within the timeout, send a SIGKILL.
+        time.sleep(15)
+        if self.server_process.is_alive():
+            os.kill(self.server_process.pid, signal.SIGKILL)
+
+    def _cal(self):
+        """
+        Loads rw.cal plugin via libpeas
+        """
+        plugin = rw_peas.PeasPlugin('rwcal_cloudsimproxy', 'RwCal-1.0')
+        engine, info, extension = plugin()
+
+        # Get the RwLogger context
+        rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+        cal = plugin.get_interface("Cloud")
+        try:
+            rc = cal.init(rwloggerctx)
+            assert rc == RwStatus.SUCCESS
+        except:
+            logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+        else:
+            logger.info("Cloudsim Cal plugin successfully instantiated")
+        return cal
+
+    def _account(self, option):
+        """
+        Args:
+            option (OptionParser): OptionParser instance.
+
+        Return:
+            CloudAccount details
+        """
+        account = RwcalYang.CloudAccount.from_dict({
+                'name': "cloudsim",
+                'account_type':'cloudsim_proxy'})
+
+        return account
+
+    def image(self):
+        """Provides Image config for openstack.
+
+        Returns:
+            ImageInfoItem
+        """
+        image = RwcalYang.ImageInfoItem.from_dict({
+                "name": str(uuid.uuid4()),
+                "location": os.path.join(os.getenv("RIFT_ROOT"), "images/rift-root-latest.qcow2"),
+                "disk_format": "qcow2",
+                "container_format": "bare",
+                "checksum": self._md5(os.path.join(os.getenv("RIFT_ROOT"), "images/rift-root-latest.qcow2")),
+            })
+        return image
+
+    def flavor(self):
+        """Flavor config for openstack
+
+        Returns:
+            FlavorInfoItem
+        """
+        flavor = RwcalYang.FlavorInfoItem.from_dict({
+                "name": str(uuid.uuid4()),
+                "vm_flavor": {
+                        "memory_mb": 16392,
+                        "vcpu_count": 4,
+                        "storage_gb": 40
+                }})
+
+        return flavor
+
+    def vdu(self):
+        """Returns VDU config
+
+        Returns:
+            VDUInitParams
+        """
+        vdu = RwcalYang.VDUInitParams.from_dict({
+                "name": str(uuid.uuid4()),
+                "node_id": "123456789012345",
+                "image_id": self.image_id,
+                "flavor_id": self.flavor_id,
+            })
+
+        c1 = vdu.connection_points.add()
+        c1.name = str(uuid.uuid4())
+        c1.virtual_link_id = self.virtual_link_id
+
+        return vdu
+
+    def virtual_link(self):
+        """vlink config for Openstack
+
+        Returns:
+            VirtualLinkReqParams
+        """
+        vlink = RwcalYang.VirtualLinkReqParams.from_dict({
+                    "name": str(uuid.uuid4()),
+                    "subnet": '192.168.1.0/24',
+            })
+
+        return vlink
+
+
+class Openstack(CloudConfig):
+    def __init__(self, option):
+        """
+        Args:
+            option (OptionParser)
+        """
+        self.image_id = None
+        self.virtual_link_id = None
+        self.flavor_id = None
+        self.vdu_id = None
+
+        super().__init__(self._cal(), self._account(option))
+
+    def _cal(self):
+        """
+        Loads rw.cal plugin via libpeas
+        """
+        plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+        engine, info, extension = plugin()
+
+        # Get the RwLogger context
+        rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+        cal = plugin.get_interface("Cloud")
+        try:
+            rc = cal.init(rwloggerctx)
+            assert rc == RwStatus.SUCCESS
+        except:
+            logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+        else:
+            logger.info("Openstack Cal plugin successfully instantiated")
+        return cal
+
+    def _account(self, option):
+        """Cloud account information for Account
+
+        Returns:
+            CloudAccount
+        """
+        acct = RwcalYang.CloudAccount.from_dict({
+            "account_type": "openstack",
+            "openstack": {
+                    "key": option.os_user,
+                    "secret": option.os_password,
+                    "auth_url": 'http://{}:5000/v3/'.format(option.os_host),
+                    "tenant": option.os_tenant,
+                    "mgmt_network": option.os_network
+                }
+            })
+
+        return acct
+    
+    def _md5(self, fname, blksize=1048576):
+        hash_md5 = hashlib.md5()
+        with open(fname, "rb") as f:
+            for chunk in iter(lambda: f.read(blksize), b""):
+                hash_md5.update(chunk)
+        return hash_md5.hexdigest()
+
+    def image(self):
+        """Provides Image config for openstack.
+
+        Returns:
+            ImageInfoItem
+        """
+        image = RwcalYang.ImageInfoItem.from_dict({
+                "name": str(uuid.uuid4()),
+                "location": os.path.join(os.getenv("RIFT_ROOT"), "images/rift-root-latest.qcow2"),
+                "disk_format": "qcow2",
+                "container_format": "bare",
+                "checksum": self._md5(os.path.join(os.getenv("RIFT_ROOT"), "images/rift-root-latest.qcow2")),
+            })
+        return image
+
+    def flavor(self):
+        """Flavor config for openstack
+
+        Returns:
+            FlavorInfoItem
+        """
+        flavor = RwcalYang.FlavorInfoItem.from_dict({
+                "name": str(uuid.uuid4()),
+                "vm_flavor": {
+                        "memory_mb": 16392,
+                        "vcpu_count": 4,
+                        "storage_gb": 40
+                },
+                "guest_epa": {
+                        "cpu_pinning_policy": "DEDICATED",
+                        "cpu_thread_pinning_policy": "SEPARATE",
+                }})
+
+        numa_node_count = 2
+        flavor.guest_epa.numa_node_policy.node_cnt = numa_node_count
+        for i in range(numa_node_count):
+            node = flavor.guest_epa.numa_node_policy.node.add()
+            node.id = i
+            if i == 0:
+                node.vcpu = [0, 1]
+            elif i == 1:
+                node.vcpu = [2, 3]
+            node.memory_mb = 8196
+
+        dev = flavor.guest_epa.pcie_device.add()
+        dev.device_id = "PCI_10G_ALIAS"
+        dev.count = 1
+
+        return flavor
+
+    def vdu(self):
+        """Returns VDU config
+
+        Returns:
+            VDUInitParams
+        """
+        vdu = RwcalYang.VDUInitParams.from_dict({
+                "name": str(uuid.uuid4()),
+                "node_id": "123456789012345",
+                "image_id": self.image_id,
+                "flavor_id": self.flavor_id,
+            })
+
+        c1 = vdu.connection_points.add()
+        c1.name = str(uuid.uuid4())
+        c1.virtual_link_id = self.virtual_link_id
+
+        return vdu
+
+    def virtual_link(self):
+        """vlink config for Openstack
+
+        Returns:
+            VirtualLinkReqParams
+        """
+        vlink = RwcalYang.VirtualLinkReqParams.from_dict({
+                    "name": str(uuid.uuid4()),
+                    "subnet": '192.168.1.0/24',
+            })
+
+        return vlink
+
+
+@pytest.fixture(scope="module", params=[Openstack], ids=lambda val: val.__name__)
+def cloud_config(request):
+    return request.param(request.config.option)
+
+
+@pytest.mark.incremental
+class TestCalSetup:
+
+    def test_start_server(self, cloud_config):
+        cloud_config.start_server()
+
+    def test_flavor_apis(self, cloud_config):
+        """
+        Asserts:
+            1. If the new flavor is created and available via read APIs
+            2. Verifies the READ APIs
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status, new_flavor_id = cal.create_flavor(account, cloud_config.flavor())
+        cloud_config.flavor_id = new_flavor_id
+        assert status == RwStatus.SUCCESS
+
+        status, flavors = cal.get_flavor_list(account)
+        assert status == RwStatus.SUCCESS
+
+        ids = []
+        for flavor in flavors.flavorinfo_list:
+            status, flavor_single = cal.get_flavor(account, flavor.id)
+            assert status == RwStatus.SUCCESS
+            assert flavor.id == flavor_single.id
+            ids.append(flavor.id)
+
+        assert new_flavor_id in ids
+
+    def test_image_apis(self, cloud_config):
+        """
+        Asserts:
+            1. If the new image is created and available via read APIs
+            2. Verifies the READ APIs
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        if type(cloud_config) is Aws:
+            # Hack!
+            new_image_id = "ami-7070231a"
+        else:
+            status, new_image_id = cal.create_image(account, cloud_config.image())
+            cloud_config.image_id = new_image_id
+            assert status == RwStatus.SUCCESS
+            cloud_config.check_state(new_image_id, "get_image", "active")
+
+
+        status, images = cal.get_image_list(account)
+
+        ids = []
+        for image in images.imageinfo_list:
+            status, image_single = cal.get_image(account, image.id)
+            assert status == RwStatus.SUCCESS
+            assert image_single.id == image.id
+            ids.append(image.id)
+
+        assert new_image_id in ids
+
+    def test_virtual_link_create(self, cloud_config):
+        """
+        Asserts:
+            1. If the new Vlink is created and available via read APIs
+            2. Verifies the READ APIs
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status, new_vlink_id = cal.create_virtual_link(account, cloud_config.virtual_link())
+        cloud_config.virtual_link_id = new_vlink_id
+        assert status.status == RwStatus.SUCCESS
+        cloud_config.check_state(new_vlink_id, "get_virtual_link", "active")
+
+        status, vlinks = cal.get_virtual_link_list(account)
+        assert status == RwStatus.SUCCESS
+
+        ids = []
+        for vlink in vlinks.virtual_link_info_list:
+            status, vlink_single = cal.get_virtual_link(account, vlink.virtual_link_id)
+            assert status == RwStatus.SUCCESS
+            assert vlink_single.virtual_link_id == vlink.virtual_link_id
+            ids.append(vlink.virtual_link_id)
+
+        assert new_vlink_id in ids
+
+    def test_vdu_apis(self, cloud_config):
+        """
+        Asserts:
+            1. If the new VDU is created and available via read APIs
+            2. Verifies the READ APIs
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status, new_vdu_id = cal.create_vdu(account, cloud_config.vdu())
+        cloud_config.vdu_id = new_vdu_id
+        assert status.status == RwStatus.SUCCESS
+        cloud_config.check_state(new_vdu_id, "get_vdu", "active")
+
+        status, vdus = cal.get_vdu_list(account)
+        assert status == RwStatus.SUCCESS
+
+        ids = []
+        for vdu in vdus.vdu_info_list:
+            status, vdu_single = cal.get_vdu(account, vdu.vdu_id)
+            assert status == RwStatus.SUCCESS
+            assert vdu_single.vdu_id == vdu.vdu_id
+            ids.append(vdu.vdu_id)
+
+        assert new_vdu_id in ids
+
+    def test_modify_vdu_api(self, cloud_config):
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        vdu_modify = RwcalYang.VDUModifyParams()
+        vdu_modify.vdu_id = cloud_config.vdu_id
+        c1 = vdu_modify.connection_points_add.add()
+        c1.name = "c_modify1"
+        # Set the new vlink
+        c1.virtual_link_id = cloud_config.virtual_link_id
+
+        status = cal.modify_vdu(account, vdu_modify)
+        assert status == RwStatus.SUCCESS
+
+@pytest.mark.incremental
+class TestCalTeardown:
+    def test_flavor_delete(self, cloud_config):
+        """
+        Asserts:
+            1. If flavor is deleted
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        if type(cloud_config) != Aws:
+            status = cal.delete_flavor(account, cloud_config.flavor_id)
+            assert status == RwStatus.SUCCESS
+
+    def test_image_delete(self, cloud_config):
+        """
+        Asserts:
+            1. If image is deleted
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        if type(cloud_config) != Aws:
+            status = cal.delete_image(account, cloud_config.image_id)
+            assert status == RwStatus.SUCCESS
+
+    def test_virtual_link_delete(self, cloud_config):
+        """
+        Asserts:
+            1. If VLink is deleted
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status = cal.delete_virtual_link(account, cloud_config.virtual_link_id)
+        assert status == RwStatus.SUCCESS
+
+    def test_delete_vdu(self, cloud_config):
+        """
+        Asserts:
+            1. If VDU is deleted
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status = cal.delete_vdu(account, cloud_config.vdu_id)
+        assert status == RwStatus.SUCCESS
+
+    def test_stop_server(self, cloud_config):
+        cloud_config.stop_server()
diff --git a/rwcal/test/cal_module_test/pytest/conftest.py b/rwcal/test/cal_module_test/pytest/conftest.py
new file mode 100644
index 0000000..c4b6705
--- /dev/null
+++ b/rwcal/test/cal_module_test/pytest/conftest.py
@@ -0,0 +1,37 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file conftest.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 21/01/2016
+
+"""
+
+def pytest_addoption(parser):
+    # Openstack related options
+    parser.addoption("--os-host", action="store", default="10.66.4.102")
+    parser.addoption("--os-user", action="store", default="pluto")
+    parser.addoption("--os-password", action="store", default="mypasswd")
+    parser.addoption("--os-tenant", action="store", default="demo")
+    parser.addoption("--os-network", action="store", default="private")
+
+    # aws related options
+    parser.addoption("--aws-user", action="store", default="AKIAIKRDX7BDLFU37PDA")
+    parser.addoption("--aws-password", action="store", default="cjCRtJxVylVkbYvOUQeyvCuOWAHieU6gqcQw29Hw")
+    parser.addoption("--aws-region", action="store", default="us-east-1")
+    parser.addoption("--aws-zone", action="store", default="us-east-1c")
+    parser.addoption("--aws-ssh-key", action="store", default="vprasad-sshkey")
diff --git a/rwcal/test/cal_module_test/racfg/cal_module_test.racfg b/rwcal/test/cal_module_test/racfg/cal_module_test.racfg
new file mode 100644
index 0000000..cd6d57a
--- /dev/null
+++ b/rwcal/test/cal_module_test/racfg/cal_module_test.racfg
@@ -0,0 +1,19 @@
+{
+  "test_name":"TC_CAL_MODULE_TESTS",
+  "commandline":"./cal_module_test --cloud-type 'openstack' --cloud-host={cloud_host} --user={user} {tenants}",
+  "target_vm":"VM",
+  "test_description":"System test targeting module tests for CAL accounts",
+  "run_as_root": true,
+  "status":"working",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2400,
+  "networks":[],
+  "vms":[
+    {
+      "name": "VM",
+      "memory": 8192,
+      "cpus": 4
+    }
+  ]
+}
+
diff --git a/rwcal/test/cloudtool_cal.py b/rwcal/test/cloudtool_cal.py
new file mode 100755
index 0000000..92f4891
--- /dev/null
+++ b/rwcal/test/cloudtool_cal.py
@@ -0,0 +1,989 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# 
+
+import os,sys,platform
+import socket
+import time
+import re
+import logging
+
+from pprint import pprint
+import argparse
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+import rw_peas
+import rwlogger
+import time
+
+global nova
+nova = None
+
+def wait_till_active(driver, account, vm_id_list, timeout):                                                                                                              
+    """
+    Wait until VM reaches ACTIVE state. 
+    """
+    # Wait while VM goes to required state
+
+    start = time.time()
+    end = time.time() + timeout
+    done = False;
+
+    while ( time.time() < end ) and not done:
+       done = True      
+       for vm_id in vm_id_list:
+           rc, rs = driver.get_vm(account, vm_id)
+           assert rc == RwStatus.SUCCESS
+           if rs.state != 'ACTIVE':
+               done = False		   
+               time.sleep(2)
+
+
+def get_image_name(node):
+    images = driver.list_images()
+    for i in images:
+        if i.id == node.extra['imageId']:
+            return i.name
+    return None
+
+def get_flavor_name(flavorid):
+    global nova
+    if nova is None:
+        nova = ra_nova_connect(project='admin')
+    for f in nova.flavors.list(True):
+         if f.id == flavorid: 
+             return f.name
+    return None
+
+def hostname():
+    return socket.gethostname().split('.')[0]
+
+def vm_register(id, driver, account, cmdargs, header=True):
+    if testbed is None:
+        print("Cannot register VM without reservation system")
+        return False
+
+    if cmdargs.reserve_new_vms:
+        user=os.environ['USER']
+    else:
+        user=None
+    fmt="%-28s %-12s %-12s %-15s"
+    if header:
+        print('VM                           controller   compute      mgmt ip')
+        print('---------------------------- ------------ ------------ ---------------')
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS
+    for node in nodes.vminfo_list:
+        if id == 'all' or node.vm_id == id:
+            flavor = driver.get_flavor(account, node.flavor_id)
+            assert rc == RwStatus.SUCCESS
+            ip = node.management_ip
+            
+            huge = 'DISABLED'	    
+            if flavor.guest_epa.mempage_size == 'LARGE':
+                huge = flavor.guest_epa.mempage_size							    	    
+            #compute = utils.find_resource(nova.servers, node.id)
+            #compute_name = compute._info['OS-EXT-SRV-ATTR:hypervisor_hostname'].split('.')[0]
+            compute_name = hostname()	    
+            try:
+                testbed.add_resource(node.vm_name, hostname(), ip, flavor.vm_flavor.memory_mb, flavor.vm_flavor.vcpu_count, user, flavor.name, compute=compute_name, huge_pages=huge )
+                print(fmt % ( node.vm_name, hostname(), compute_name, ip )) 
+            except Exception as e:
+                print("WARNING: Error \"%s\"adding resource to reservation system" % e)
+
+class OFromDict(object):
+  def __init__(self, d):
+    self.__dict__ = d
+
+
+def vm_create_subcommand(driver, account, cmdargs):
+    """Process the VM create subcommand."""
+    if cmdargs.name and cmdargs.count != 1:
+        sys.exit("Error: when VM name is specified, the count must be 1")
+
+    rc, sizes = driver.get_flavor_list(account)
+    assert rc == RwStatus.SUCCESS
+
+    try:
+        size = [s for s in sizes.flavorinfo_list if s.name == cmdargs.flavor][0]
+    except IndexError:
+        sys.exit("Error: Failed to create VM, couldn't find flavor %s" % \
+                 cmdargs.flavor)
+    print(size)
+    rc, images = driver.get_image_list(account)
+    assert rc == RwStatus.SUCCESS
+    if images is None:
+    	sys.exit("Error: No images found")
+    try:
+        image = [i for i in images.imageinfo_list if cmdargs.image in i.name][0]
+    except IndexError:
+        sys.exit("Error: Failed to create VM, couldn't find image %s" % \
+                 cmdargs.image)
+    print(image)
+
+    # VM name is not specified, so determine a unique VM name
+    # VM name should have the following format:
+    #     rwopenstack_<host>_vm<id>, e.g., rwopenstack_grunt16_vm1
+    # The following code gets the list of existing VMs and determines
+    # a unique id for the VM name construction.
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS
+    prefix = 'rwopenstack_%s_vm' % hostname()
+    vmid = 0;
+    for n in nodes.vminfo_list:
+        if n.vm_name.startswith(prefix):
+            temp_str = n.vm_name[len(prefix):]
+            if temp_str == '':
+                temp = 1
+            else:
+                temp = int(n.vm_name[len(prefix):])
+
+            if (temp > vmid):
+                vmid = temp
+
+    nodelist = []
+    for i in range(0, cmdargs.count):
+            if cmdargs.name:
+                vm_name = cmdargs.name
+            else:
+                vm_name = '%s%d' % (prefix, vmid+i+1)
+ 
+            rc, netlist = driver.get_network_list(account)
+            assert rc == RwStatus.SUCCESS	
+            for network in netlist.networkinfo_list:
+                 print(network)    
+
+            vm = RwcalYang.VMInfoItem()
+            vm.vm_name = vm_name
+            vm.flavor_id = size.id
+            vm.image_id  = image.id
+            vm.cloud_init.userdata = ''
+
+            nets = dict()
+            for network in netlist.networkinfo_list:
+                if network.network_name != "public":
+                    nwitem = RwcalYang.VMInfoItem_NetworkList()			
+                    nwitem.network_id = network.network_id		    
+                    nets[network.network_name] = nwitem
+                     
+            logger.debug('creating VM using nets %s' % cmdargs.networks )
+            for net in cmdargs.networks.split(','):
+                if not net in nets:
+                    print(("Invalid network name '%s'" % net))
+                    print(('available nets are %s' % ','.join(list(nets.keys())) ))
+                    sys.exit(1)
+                if net != cmdargs.mgmt_network:
+                    vm.network_list.append(nets[net])
+
+            print(vm.network_list)
+            rc, node_id = driver.create_vm(account, vm) 
+
+            # wait for 1 to be up before starting the rest
+            # this is an attempt to make sure the image is cached
+            nodelist.append(node_id)
+            if i == 0 or cmdargs.wait_after_create is True:
+                #wait_until_running([node], timeout=300)
+                wait_till_active(driver, account, nodelist, timeout=300)		
+            print(node_id)
+    if cmdargs.reservation_server_url is not None:
+            if not cmdargs.wait_after_create:
+                print("Waiting for VMs to start")
+                wait_till_active(driver, account, nodelist, timeout=300)		
+                print("VMs are up")
+            header=True
+            for node in nodelist:
+                vm_register(node, driver, account, cmdargs, header)
+                header=False
+                
+
+def vm_destroy_subcommand(driver, account, cmdargs):
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS	
+    ct = len(nodes.vminfo_list)
+    if cmdargs.destroy_all or cmdargs.wait:
+        rc=0
+        for n in nodes.vminfo_list:
+            if testbed is not None:
+                try:
+                    testbed.remove_resource(n.vm_name)
+                except:
+                    print("WARNING: error deleting resource from reservation system")
+            if RwStatus.SUCCESS != driver.delete_vm(account, n.vm_id):
+                print('Error: failed to destroy node %s' % n.vm_name)
+                rc=1
+        if rc:
+            sys.exit(1)
+        if cmdargs.wait:
+            while ct > 0:
+                sys.stderr.write("waiting for %d VMs to exit...\n" % ct)
+                time.sleep(1)
+                try:
+                    rc, nodesnw = driver.get_vm_list(account)
+                    assert rc == RwStatus.SUCCESS	
+                    ct = len(nodesnw.vminfo_list )
+                except:
+                    pass
+        
+    else:
+        vm_re = re.compile('^%s$' % cmdargs.vm_name)
+        ct = 0
+        for n in nodes.vminfo_list:
+            if vm_re.match(n.vm_name):
+                ct += 1
+                if testbed is not None:
+                    try:
+                        testbed.remove_resource(n.vm_name)
+                    except:
+                        print("WARNING: error deleting resource from reservation system")
+                if RwStatus.SUCCESS != driver.delete_vm(account, n.vm_id):
+                    print('Error: failed to destroy node %s' % n.vm_name)
+                    return
+                print('destroyed %s' % n.vm_name)
+        if ct == 0:
+            print("No VMs matching \"%s\" found" % ( cmdargs.vm_name ))
+        
+                    
+def vm_rebuild_subcommand(driver, account, cmdargs):
+    images = driver.list_images()
+    found=0
+    for i in images:
+        if i.name == cmdargs.image_name:
+            found=1
+            break
+    if found != 1:
+        print('Error: Rebuild failed - image %s not found' % cmdargs.image_name)
+        sys.exit(1)
+    image=i
+    nodes = driver.list_nodes()
+    if cmdargs.rebuild_all:
+        rc=0
+        for n in nodes:
+            if not driver.ex_rebuild(n,image):
+                print('Error: failed to rebuild node %s' % n.name)
+                rc=1
+            if rc:
+               sys.exit(1)
+            rebuilt=0
+            while rebuilt != 1:
+                time.sleep(10)
+                nw_nodes = driver.list_nodes()
+                for nw in nw_nodes:
+                    if nw.name == n.name:
+                        if nw.state == n.state:
+                            rebuilt=1
+                        break  
+    else:
+        vm_re = re.compile('^%s$' % cmdargs.vm_name)
+        ct = 0
+        for n in nodes:
+            if vm_re.match(n.name):
+                ct += 1
+                if not driver.ex_rebuild(n,image):
+                    print('Error: failed to rebuild node %s' % n.name)
+                    return
+                print('Rebuilt %s' % n.name)
+                rebuilt=0
+                while rebuilt != 1:
+                    time.sleep(10)
+                    nw_nodes = driver.list_nodes()
+                    for nw in nw_nodes:
+                        if nw.name == n.name:
+                            if nw.state == n.state:
+                                rebuilt=1
+                            break  
+        if ct == 0:
+            print("No VMs matching \"%s\" found" % ( cmdargs.vm_name ))
+        
+                    
+
+def vm_reboot_subcommand(driver, account, cmdargs):
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS	
+    if cmdargs.reboot_all:
+        for n in nodes.vminfo_list:
+            '''
+            if not n.reboot():
+                print 'Error: failed to reboot node %s' % n.name
+            else:
+                print "rebooted %s" % n.name
+            '''
+            time.sleep(cmdargs.sleep_time)
+    else:
+        for n in nodes.vminfo_list:
+            if n.vm_name == cmdargs.vm_name:
+                if RwStatus.SUCCESS !=  driver.reboot_vm(account,n.vm_id):
+                    print('Error: failed to reboot node %s' % n.vm_name)
+                else:
+                    print("rebooted %s" % n.vm_name)
+                    
+
+def vm_start_subcommand(driver, account, cmdargs):
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS	
+    if cmdargs.start_all:
+        for n in nodes.vminfo_list:
+            print(dir(n))
+            if RwStatus.SUCCESS != driver.start_vm(account, n.vm_id):
+                print('Error: failed to start node %s' % n.vm_name)
+            else:
+                print("started %s" % n.vm_name)
+    else:
+        for n in nodes.vminfo_list:
+            if n.vm_name == cmdargs.vm_name:
+                if RwStatus.SUCCESS != driver.start_vm(account, n.vm_id):
+                    print('Error: failed to start node %s' % n.vm_name)
+                else:
+                    print("started %s" % n.vm_name)
+                    
+def vm_subcommand(driver, account, cmdargs):
+    """Process the vm subcommand"""
+
+    if cmdargs.which == 'list':
+        rc, nodes = driver.get_vm_list(account)
+        assert rc == RwStatus.SUCCESS	
+        for n in nodes.vminfo_list:
+            print(n)		
+            if n.state == 4:
+                if not cmdargs.ipsonly:
+                    print("%s is shutoff" % n.vm_name)
+            elif cmdargs.ipsonly:
+                i = n.management_ip
+                if i is not None:
+                    print(i)
+            else: 
+                if n.management_ip is not None:
+                    if len(n.private_ip_list) > 0:
+                        print("%s %s,%s" % (n.vm_name, n.management_ip, ",".join([i.get_ip_address() for i in n.private_ip_list])))
+                    else:
+                        print("%s %s" % (n.vm_name, n.management_ip))
+                else:
+                    print("%s NO IP" % n.vm_name)
+
+    elif cmdargs.which == 'create':
+        vm_create_subcommand(driver, account, cmdargs)
+
+    elif cmdargs.which == 'reboot':
+        vm_reboot_subcommand(driver, account, cmdargs)
+    elif cmdargs.which == 'start':
+        vm_start_subcommand(driver, account, cmdargs)
+    elif cmdargs.which == 'destroy':
+        vm_destroy_subcommand(driver, account, cmdargs)
+    #elif cmdargs.which == 'rebuild':
+    #    vm_rebuild_subcommand(driver, account, cmdargs)
+
+def image_delete_subcommand(driver, account, cmdargs):
+    rc,images = driver.get_image_list(account)
+    assert rc == RwStatus.SUCCESS
+    account.openstack.key          = 'admin'
+    if cmdargs.delete_all:
+        for i in images.imageinfo_list:
+            if RwStatus.SUCCESS != driver.delete_image(account, i.id):
+                print('Error: failed to delete image %s' % i.name)
+    else:
+        for i in images.imageinfo_list:
+            if i.name == cmdargs.image_name:
+                if RwStatus.SUCCESS != driver.delete_image(account, i.id):
+                    print('Error: failed to delete image %s' % i.name)
+
+def image_subcommand(driver, account, cmdargs):
+    """Process the image subcommand"""
+    if cmdargs.which == 'list':
+        rc, images = driver.get_image_list(account)
+        assert rc == RwStatus.SUCCESS
+
+        for i in images.imageinfo_list:
+            print(i)
+
+    elif cmdargs.which == 'delete':
+        image_delete_subcommand(driver, account, cmdargs)
+
+    elif cmdargs.which == 'create':
+        account.openstack.key          = 'admin'
+        rc, images = driver.get_image_list(account)
+        assert rc == RwStatus.SUCCESS
+        for i in images.imageinfo_list:
+            if i.name == cmdargs.image_name:
+                print("FATAL: image \"%s\" already exists" % cmdargs.image_name)
+                return 1
+        
+        print("creating image \"%s\" using %s ..." % \
+              (cmdargs.image_name, cmdargs.file_name))
+        img = RwcalYang.ImageInfoItem()
+        img.name = cmdargs.image_name
+        img.location = cmdargs.file_name
+        img.disk_format = "qcow2"
+        img.container_format = "bare"
+        rc, img_id = driver.create_image(account, img)	
+        print("... done. image_id is %s" % img_id)
+        return img_id
+
+    elif cmdargs.which == 'getid':
+        rc, images = driver.get_image_list(account)
+        assert rc == RwStatus.SUCCESS
+        found=0
+        for i in images.imageinfo_list:
+            if i.name == cmdargs.image_name:
+                print(i.id)
+                found += 1
+        if found != 1:
+            sys.exit(1)
+        
+def flavor_subcommand(driver, account, cmdargs):
+    """Process the flavor subcommand"""
+    if cmdargs.which == 'list':
+        rc, sizes = driver.get_flavor_list(account)
+        assert rc == RwStatus.SUCCESS
+        for f in sizes.flavorinfo_list:
+            rc, flv = driver.get_flavor(account, f.id)	    
+            print(flv)	    
+    elif cmdargs.which == 'create':
+        account.openstack.key          = 'admin'    
+        flavor                                     = RwcalYang.FlavorInfoItem()
+        flavor.name                                = cmdargs.flavor_name
+        flavor.vm_flavor.memory_mb                 = cmdargs.memory_size
+        flavor.vm_flavor.vcpu_count                = cmdargs.vcpu_count
+        flavor.vm_flavor.storage_gb                = cmdargs.disc_size
+        if cmdargs.hugepages_kilo:
+            flavor.guest_epa.mempage_size              = cmdargs.hugepages_kilo
+        if cmdargs.numa_nodes:
+            flavor.guest_epa.numa_node_policy.node_cnt = cmdargs.numa_nodes
+        if cmdargs.dedicated_cpu:
+            flavor.guest_epa.cpu_pinning_policy        = 'DEDICATED'
+        if cmdargs.pci_count:
+            dev = flavor.guest_epa.pcie_device.add()
+            dev.device_id = 'PCI_%dG_ALIAS' % (cmdargs.pci_speed)
+            dev.count = cmdargs.pci_count 
+        if cmdargs.colleto:
+            dev = flavor.guest_epa.pcie_device.add()
+            dev.device_id = 'COLETO_VF_ALIAS'
+            dev.count = cmdargs.colleto 
+        if cmdargs.trusted_host:
+            flavor.guest_epa.trusted_execution = True 
+
+        rc, flavor_id = driver.create_flavor(account, flavor)
+        assert rc == RwStatus.SUCCESS
+
+        print("created flavor %s id %s" % (cmdargs.flavor_name, flavor_id)) 
+
+    elif cmdargs.which == 'delete':
+        account.openstack.key          = 'admin'    
+        rc, sizes = driver.get_flavor_list(account)
+        assert rc == RwStatus.SUCCESS
+        for f in sizes.flavorinfo_list:
+            if f.name == cmdargs.flavor_name:
+                rc = driver.delete_flavor(account, f.id)
+                assert rc == RwStatus.SUCCESS
+
+def hostagg_subcommand(driver, account, cmdargs):
+    """Process the hostagg subcommand"""
+    if cmdargs.which == 'list':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            print("%-12s %-12s" % \
+                  (f.name, f.availability_zone))
+                
+    elif cmdargs.which == 'create':
+        nova = ra_nova_connect(project='admin')
+        hostagg = nova.aggregates.create(cmdargs.hostagg_name, 
+                                     cmdargs.avail_zone)
+        print("created hostagg %s in %s" % (hostagg.name, hostagg.availability_zone)) 
+
+    elif cmdargs.which == 'delete':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            if f.name == cmdargs.hostagg_name:
+                if cmdargs.force_delete_hosts:
+                    for h in f.hosts:
+                        f.remove_host(h)
+
+                f.delete()
+
+    elif cmdargs.which == 'addhost':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            if f.name == cmdargs.hostagg_name:
+                f.add_host(cmdargs.host_name)
+
+    elif cmdargs.which == 'delhost':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            if f.name == cmdargs.hostagg_name:
+                f.remove_host(cmdargs.host_name)
+
+    elif cmdargs.which == 'setmetadata':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            if f.name == cmdargs.hostagg_name:
+                d = dict([cmdargs.extra_specs.split("="),])		    
+                f.set_metadata(d)
+
+def quota_subcommand(driver, account, cmdargs):
+    """Process the quota subcommand"""
+    nova = ra_nova_connect(project='admin')
+    cfgfile = get_openstack_file(None,  cmdargs.project)
+    kwargs = load_params(cfgfile)
+
+    keystone = keystone_client.Client(username=kwargs.get('OS_USERNAME'),
+                               password=kwargs.get('OS_PASSWORD'),
+                               tenant_name=kwargs.get('OS_TENANT_NAME'),
+                               auth_url=kwargs.get('OS_AUTH_URL'))
+    if cmdargs.which == 'set':
+        nova.quotas.update(keystone.tenant_id, 
+                           ram=cmdargs.memory, 
+                           floating_ips=cmdargs.ips, 
+                           instances=cmdargs.vms, 
+                           cores=cmdargs.vcpus)
+    elif cmdargs.which == 'get':
+        print("get quotas for tenant %s %s" % \
+              (cmdargs.project, keystone.tenant_id))
+        q = nova.quotas.get(keystone.tenant_id)
+        for att in [ 'ram', 'floating_ips', 'instances', 'cores' ]: 
+            print("%12s: %6d" % ( att, getattr(q, att) ))
+        
+def rules_subcommand(driver, account, cmdargs):
+    nova = ra_nova_connect(project='demo')
+    group=nova.security_groups.find(name='default')
+    if cmdargs.which == 'set':
+        try:
+            nova.security_group_rules.create(group.id,ip_protocol='tcp', from_port=1, to_port=65535 )
+        except BadRequest:
+            pass
+        try: 
+            nova.security_group_rules.create(group.id, ip_protocol='icmp',from_port=-1, to_port=-1 )
+        except BadRequest:
+            pass
+            
+    elif cmdargs.which == 'list':
+        for r in group.rules:
+            if r['from_port'] == -1:
+                print("rule %d proto %s from IP %s" % ( r['id'], r['ip_protocol'], r['ip_range']['cidr'] ))
+            else:
+                print("rule %d proto %s from port %d to %d from IP %s" % ( r['id'], r['ip_protocol'], r['from_port'], r['to_port'], r['ip_range']['cidr'] ))
+
+
+def register_subcommand(driver, account, cmdargs):
+    cmdargs.reserve_new_vms = False
+    vm_register('all', driver, account, cmdargs)       
+           
+##
+# Command line argument specification
+##
+desc="""This tool is used to manage the VMs"""
+kilo=platform.dist()[1]=='21'
+parser = argparse.ArgumentParser(description=desc)
+subparsers = parser.add_subparsers()
+ipaddr = socket.gethostbyname(socket.getfqdn())
+reservation_server_url = os.environ.get('RESERVATION_SERVER', 'http://reservation.eng.riftio.com:80')
+# ipaddr = netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']
+#default_auth_url = 'http://%s:5000/v3/' % ipaddr
+default_auth_url = 'http://10.66.4.27:5000/v3/'
+
+parser.add_argument('-t', '--provider-type', dest='provider_type',
+                    type=str, default='OPENSTACK', 
+                    help='Cloud provider type (default: %(default)s)')
+parser.add_argument('-u', '--user-name', dest='user', 
+                    type=str, default='demo', 
+                    help='User name (default: %(default)s)')
+parser.add_argument('-p', '--password', dest='passwd', 
+                    type=str, default='mypasswd', 
+                    help='Password (default: %(default)s)')
+parser.add_argument('-m', '--mgmt-nw', dest='mgmt_network', 
+                    type=str, default='private', 
+                    help='mgmt-network (default: %(default)s)')
+parser.add_argument('-a', '--auth-url', dest='auth_url', 
+                    type=str, default=default_auth_url, 
+                    help='Password (default: %(default)s)')
+parser.add_argument('-r', '--reservation_server_url', dest='reservation_server_url', 
+                    type=str, default=reservation_server_url, 
+                    help='reservation server url, use None to disable (default %(default)s)' )
+parser.add_argument('-d', '--debug', dest='debug', action='store_true', help='raise the logging level')
+
+##
+# Subparser for VM
+##
+vm_parser = subparsers.add_parser('vm')
+vm_subparsers = vm_parser.add_subparsers()
+
+# Create VM subparser
+vm_create_parser = vm_subparsers.add_parser('create')
+vm_create_parser.add_argument('-c', '--count',
+                              type=int, default=1,
+                              help='The number of VMs to launch '
+                                   '(default: %(default)d)')
+vm_create_parser.add_argument('-i', '--image', 
+							  default='rwopenstack_vm',
+                              help='Specify the image for the VM  (default: %(default)s)')
+vm_create_parser.add_argument('-n', '--name',
+                              help='Specify the name of the VM')
+vm_create_parser.add_argument('-f', '--flavor',
+                              help='Specify the flavor for the VM')
+vm_create_parser.add_argument('-R', '--reserve', dest='reserve_new_vms', 
+                    action='store_true', help='reserve any newly created VMs')
+vm_create_parser.add_argument('-s', '--single', dest='wait_after_create', 
+                    action='store_true', help='wait for each VM to start before creating the next')
+vm_create_parser.add_argument('-N', '--networks', dest='networks', type=str, 
+                                default='private,private2,private3,private4',
+                                help='comma separated list of networks to connect these VMs to (default: %(default)s)' )
+
+vm_create_parser.set_defaults(which='create')
+# Reboot VM subparser
+vm_reboot_parser = vm_subparsers.add_parser('reboot')
+group = vm_reboot_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--vm-name', dest='vm_name',
+                   type=str,
+                   help='Specify the name of the VM')
+group.add_argument('-a', '--reboot-all', 
+                   dest='reboot_all', action='store_true',
+                   help='Reboot all VMs')
+vm_reboot_parser.add_argument('-s', '--sleep', dest='sleep_time', type=int, default=4, help='time in seconds to sleep between reboots')
+vm_reboot_parser.set_defaults(which='reboot')
+
+
+"""
+# start VM subparser
+vm_start_parser = vm_subparsers.add_parser('start')
+group = vm_start_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--vm-name', dest='vm_name',
+                   type=str,
+                   help='Specify the name of the VM')
+group.add_argument('-a', '--start-all', 
+                   dest='start_all', action='store_true',
+                   help='Start all VMs')
+vm_start_parser.set_defaults(which='start')
+"""
+
+# Destroy VM subparser
+vm_destroy_parser = vm_subparsers.add_parser('destroy')
+group = vm_destroy_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--vm-name', dest='vm_name',
+                   type=str,
+                   help='Specify the name of the VM (accepts regular expressions)')
+group.add_argument('-a', '--destroy-all', 
+                   dest='destroy_all', action='store_true',
+                   help='Delete all VMs')
+group.add_argument('-w', '--wait', 
+                   dest='wait', action='store_true',
+                   help='destroy all and wait until all VMs have exited')
+vm_destroy_parser.set_defaults(which='destroy')
+
+# Rebuild VM subparser
+vm_rebuild_parser = vm_subparsers.add_parser('rebuild')
+group = vm_rebuild_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--vm-name', dest='vm_name',
+                   type=str,
+                   help='Specify the name of the VM (accepts regular expressions)')
+group.add_argument('-a', '--rebuild-all', 
+                   dest='rebuild_all', action='store_true',
+                   help='Rebuild all VMs')
+vm_rebuild_parser.add_argument('-i', '--image-name', dest='image_name',
+                              type=str,
+                              help='Specify the name of the image')
+vm_rebuild_parser.set_defaults(which='rebuild')
+
+# List VM subparser
+vm_list_parser = vm_subparsers.add_parser('list')
+vm_list_parser.set_defaults(which='list')
+vm_list_parser.add_argument('-i', '--ips_only', dest='ipsonly', 
+                            action='store_true', 
+                            help='only list IP addresses')
+
+vm_parser.set_defaults(func=vm_subcommand)
+
+##
+# Subparser for image
+##
+image_parser = subparsers.add_parser('image')
+image_subparsers = image_parser.add_subparsers()
+
+# List image subparser
+image_list_parser = image_subparsers.add_parser('list')
+image_list_parser.set_defaults(which='list')
+
+# Delete image subparser
+image_destroy_parser = image_subparsers.add_parser('delete')
+group = image_destroy_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--image-name', dest='image_name',
+                   type=str,
+                   help='Specify the name of the image')
+group.add_argument('-a', '--delete-all', 
+                   dest='delete_all', action='store_true',
+                   help='Delete all images')
+image_destroy_parser.set_defaults(which='delete')
+
+# create image
+image_create_parser = image_subparsers.add_parser('create')
+image_create_parser.set_defaults(which='create')
+image_create_parser.add_argument('-n', '--image-name', dest='image_name',
+                                  type=str,
+                                  default="rwopenstack_vm",
+                                  help='Specify the name of the image')
+image_create_parser.add_argument('-f', '--filename', dest='file_name',
+                                  type=str, 
+                                  default='/net/sharedfiles/home1/common/vm/rift-root-current.qcow2',
+                                  help='name of the existing qcow2 image file')
+
+
+image_create_parser = image_subparsers.add_parser('getid')
+image_create_parser.set_defaults(which='getid')
+image_create_parser.add_argument('-n', '--image-name', dest='image_name',
+                                  type=str,
+                                  default="rwopenstack_vm",
+                                  help='Specify the name of the image')
+image_parser.set_defaults(func=image_subcommand)
+
+##
+# Subparser for flavor
+##
+flavor_parser = subparsers.add_parser('flavor')
+flavor_subparsers = flavor_parser.add_subparsers()
+
+# List flavor subparser
+flavor_list_parser = flavor_subparsers.add_parser('list')
+flavor_list_parser.set_defaults(which='list')
+
+# Create flavor subparser
+flavor_create_parser = flavor_subparsers.add_parser('create')
+flavor_create_parser.set_defaults(which='create')
+flavor_create_parser.add_argument('-n', '--flavor-name', dest='flavor_name',
+                                  type=str,
+                                  help='Specify the name of the flavor')
+flavor_create_parser.add_argument('-m', '--memory-size', dest='memory_size',
+                                  type=int, default=1024,
+                                  help='Specify the size of the memory in MB '
+                                       '(default: %(default)d)')
+flavor_create_parser.add_argument('-d', '--disc-size', dest='disc_size',
+                                  type=int, default=16,
+                                  help='Specify the size of the disc in GB '
+                                       '(default: %(default)d)')
+flavor_create_parser.add_argument('-v', '--vcpu-count', dest='vcpu_count',
+                                  type=int, default=1,
+                                  help='Specify the number of VCPUs '
+                                       '(default: %(default)d)')
+flavor_create_parser.add_argument('-p', '--pci-count', dest='pci_count',
+                                  type=int, default=0,
+                                  help='Specify the number of PCI devices '
+                                       '(default: %(default)d)')
+flavor_create_parser.add_argument('-s', '--pci-speed', dest='pci_speed',
+                                  type=int, default=10,
+                                  help='Specify the speed of the PCI devices in Gbps (default: %(default)d)')
+flavor_create_parser.add_argument('-e', '--hostagg-extra-specs', dest='extra_specs',
+                                  type=str, 
+                                  help='Specify the extra spec ')
+flavor_create_parser.add_argument('-b', '--back-with-hugepages', dest='enable_hugepages',
+                                  action='store_true',
+                                  help='Enable memory backing with hugepages')
+flavor_create_parser.add_argument('-B', '--back-with-hugepages-kilo', dest='hugepages_kilo',
+                                  type=str,
+                                  help='Enable memory backing with hugepages for kilo')
+flavor_create_parser.add_argument('-D', '--dedicated_cpu', dest='dedicated_cpu',
+                                  action='store_true',
+                                  help='Dedicated CPU usage')
+flavor_create_parser.add_argument('-T', '--cpu_threads', dest='cpu_threads',
+                                  type=str, 
+                                  help='CPU threads usage')
+flavor_create_parser.add_argument('-N', '--numa_nodes', dest='numa_nodes',
+                                  type=int, 
+                                  help='Configure numa nodes')
+flavor_create_parser.add_argument('-t', '--trusted-host', dest='trusted_host',  action='store_true', help='restrict instances to trusted hosts')
+flavor_create_parser.add_argument('-c', '--crypto-cards', dest='colleto',  type=int, default=0,  \
+                                    help='how many colleto creek VFs should be passed thru to the VM')
+
+# Delete flavor subparser
+flavor_delete_parser = flavor_subparsers.add_parser('delete')
+flavor_delete_parser.set_defaults(which='delete')
+flavor_delete_parser.add_argument('-n', '--flavor-name', dest='flavor_name',
+                                  type=str,
+                                  help='Specify the name of the flavor')
+
+flavor_parser.set_defaults(func=flavor_subcommand)
+
+##
+# Subparser for host-aggregate 
+##
+hostagg_parser = subparsers.add_parser('hostagg')
+hostagg_subparsers = hostagg_parser.add_subparsers()
+
+# List host-aggregate subparser
+hostagg_list_parser = hostagg_subparsers.add_parser('list')
+hostagg_list_parser.set_defaults(which='list')
+
+# Create hostagg subparser
+hostagg_create_parser = hostagg_subparsers.add_parser('create')
+hostagg_create_parser.set_defaults(which='create')
+hostagg_create_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_create_parser.add_argument('-a', '--avail-zone', dest='avail_zone',
+                                  type=str,
+                                  help='Specify the name of the availability zone')
+# Delete hostagg subparser
+hostagg_delete_parser = hostagg_subparsers.add_parser('delete')
+hostagg_delete_parser.set_defaults(which='delete')
+hostagg_delete_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_delete_parser.add_argument('-f', '--force-delete-hosts', dest='force_delete_hosts',
+                                  action='store_true',
+                                  help='Delete the existing hosts')
+
+# Add host subparser
+hostagg_addhost_parser = hostagg_subparsers.add_parser('addhost')
+hostagg_addhost_parser.set_defaults(which='addhost')
+hostagg_addhost_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_addhost_parser.add_argument('-c', '--compute-host-name', dest='host_name',
+                                  type=str,
+                                  help='Specify the name of the host to be added')
+
+# Remove host subparser
+hostagg_delhost_parser = hostagg_subparsers.add_parser('delhost')
+hostagg_delhost_parser.set_defaults(which='delhost')
+hostagg_delhost_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_delhost_parser.add_argument('-c', '--compute-host-name', dest='host_name',
+                                  type=str,
+                                  help='Specify the name of the host to be removed')
+
+# Set meta-data subparser
+hostagg_setdata_parser = hostagg_subparsers.add_parser('setmetadata')
+hostagg_setdata_parser.set_defaults(which='setmetadata')
+hostagg_setdata_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_setdata_parser.add_argument('-d', '--meta-data', dest='extra_specs',
+                                  type=str,
+                                  help='Specify the meta-data to be associated to this host aggregate')
+
+hostagg_parser.set_defaults(func=hostagg_subcommand)
+
+##
+# Subparser for quota
+##
+quota_parser = subparsers.add_parser('quota')
+quota_subparser = quota_parser.add_subparsers()
+quota_set_parser = quota_subparser.add_parser('set')
+
+# quota set subparser
+quota_set_parser.set_defaults(which='set')
+quota_set_parser.add_argument('-p', '--project', dest='project', 
+                              type=str, default='demo', 
+                              help='project name that you wish to set '
+                                   'the quotas for')
+quota_set_parser.add_argument('-c', '--vcpus', dest='vcpus', 
+                              type=int, default=48, 
+                              help='Maximum number of virtual CPUs that can '
+                                   'be assigned to all VMs in aggregate')
+quota_set_parser.add_argument('-v', '--vms', dest='vms', 
+                              type=int, default=24, 
+                              help='Maximum number of VMs that can be created ' 
+                                   'on this openstack instance '
+                                   '(which may be more than 1 machine)')
+quota_set_parser.add_argument('-i', '--ips', dest='ips', 
+                              type=int, default=250, 
+                              help='Maximum number of Floating IP Addresses '
+                                   'that can be assigned to all VMs '
+                                   'in aggregate')
+quota_set_parser.add_argument('-m', '--memory', dest='memory', 
+                              type=int, default=122880, 
+                              help='Maximum amount of RAM in MB that can be '
+                                   'assigned to all VMs in aggregate')
+
+# quota get subparser
+quota_get_parser = quota_subparser.add_parser('get')
+quota_get_parser.add_argument('-p', '--project', dest='project', 
+                              type=str, default='demo', 
+                              help='project name that you wish to get '
+                                   'the quotas for')
+quota_get_parser.set_defaults(which='get')
+quota_parser.set_defaults(func=quota_subcommand)
+
+##
+# rules subparser
+##
+rules_parser = subparsers.add_parser('rules')
+rules_parser.set_defaults(func=rules_subcommand)
+rules_subparser = rules_parser.add_subparsers()
+rules_set_parser = rules_subparser.add_parser('set')
+rules_set_parser.set_defaults(which='set')
+rules_list_parser = rules_subparser.add_parser('list')
+rules_list_parser.set_defaults(which='list')
+
+register_parser = subparsers.add_parser('register')
+register_parser.set_defaults(func=register_subcommand)
+ 
+cmdargs = parser.parse_args()
+
+
+if __name__ == "__main__":
+    logger=logging.getLogger(__name__)
+    if cmdargs.debug:
+        logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s', level=logging.DEBUG) 
+    else:
+        logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s', level=logging.WARNING) 
+
+    if cmdargs.provider_type == 'OPENSTACK':
+        #cls = get_driver(Provider.OPENSTACK)
+        pass
+    elif cmdargs.provider_type == 'VSPHERE':
+        cls = get_driver(Provider.VSPHERE)
+    else:
+        sys.exit("Cloud provider %s is NOT supported yet" % cmdargs.provider_type)
+
+    if cmdargs.reservation_server_url == "None" or cmdargs.reservation_server_url == "":
+        cmdargs.reservation_server_url = None
+    if cmdargs.reservation_server_url is not None:
+        sys.path.append('/usr/rift/lib')
+        try:
+            import ndl
+        except Exception as e:
+            logger.warning("Error loading Reservation library")
+            testbed=None
+        else:
+            testbed=ndl.Testbed()
+            testbed.set_server(cmdargs.reservation_server_url)
+            
+
+
+    if cmdargs.provider_type == 'OPENSTACK':
+        account                        = RwcalYang.CloudAccount()
+        account.account_type           = "openstack"
+        account.openstack.key          = cmdargs.user
+        account.openstack.secret       = cmdargs.passwd
+        account.openstack.auth_url     = cmdargs.auth_url
+        account.openstack.tenant       = cmdargs.user
+        account.openstack.mgmt_network = cmdargs.mgmt_network
+
+        plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+        engine, info, extension = plugin()
+        driver = plugin.get_interface("Cloud")
+        # Get the RwLogger context
+        rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+        try:
+            rc = driver.init(rwloggerctx)
+            assert rc == RwStatus.SUCCESS
+        except:
+            logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+        else:
+            logger.info("Openstack Cal plugin successfully instantiated")
+
+        cmdargs.func(driver, account, cmdargs)
+
+    elif cmdargs.provider_type == 'VSPHERE':
+        driver = cls(cmdargs.user, cmdargs.passwd, host='vcenter' )
+        cmdargs.func(driver, cmdargs)
diff --git a/rwcal/test/ec2.py b/rwcal/test/ec2.py
new file mode 100644
index 0000000..59ad049
--- /dev/null
+++ b/rwcal/test/ec2.py
@@ -0,0 +1,275 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import glob
+import itertools
+import os
+
+import boto
+import boto.vpc
+
+# TODO:  Pull the lastest of owned instances.
+__default_instance_ami__ = 'ami-e421bc8c'
+
+# TODO:  Make VPC's per user?
+__default_subnet__ = 'subnet-4b484363'
+__default_security_group__ = 'sg-d9da90bc'
+
+__default_instance_type__ = 'm1.medium'
+__default_vpc__ = 'vpc-e7ed4482'
+
+class RWEC2(object):
+    def __init__(self,  subnet=None, ami=None):
+        self._subnet = subnet if subnet is not None else __default_subnet__
+        self._ami = ami if ami is not None else __default_instance_ami__
+
+        self._conn = boto.connect_ec2()
+
+    @staticmethod
+    def cloud_init_current_user():
+        """
+        Return user_data configuration suitable for cloud-init that will create a user
+        with sudo and ssh key access on the remote instance.
+
+        ssh keys are found with the glob ~/.ssh/*pub*
+        """
+        user_data = "users:\n"
+        user_data += " - name: %s\n" % (os.getlogin(),)
+        user_data += "   groups: [wheel, adm, systemd-journal]\n"
+        user_data += "   sudo: [\"ALL=(ALL) NOPASSWD:ALL\"]\n"
+        user_data += "   shell: /bin/bash\n"
+        user_data += "   ssh_authorized_keys:\n"
+        for pub_key in glob.glob('%s/.ssh/*pub*' % (os.environ['HOME'],)):
+            with open(pub_key) as fp:
+                user_data += "    -  %s" % (fp.read(),)
+
+        return user_data
+
+
+    @staticmethod
+    def cloud_init_yum_repos():
+        """
+        Return a string of user_data commands that can be used to update the yum
+        repos to point to the correct location.  They should be added by the caller
+        within a 'runcmd:' block.
+        """
+        ret = " - sed -i -e 's,www\.,,' -e 's,riftio\.com/mirrors,riftio.com:8881,' /etc/yum.repos.d/*.repo\n"
+        return ret
+
+    def instances(self, cluster_component, cluster_instance):
+        """
+        List of instances owned by the given cluster instance
+
+        @param cluster_component  - parent cluster of each instance
+        @param cluster_instance   - instance id of the owning cluster
+        @param n_instances        - number of requested instances
+
+        @return                   - list of boto.ec2.instance.Instances provisioned
+        """
+        ret = []
+        reservations = self._conn.get_all_instances()
+        for instance in [instance for reservation in reservations for instance in reservation.instances]:
+            tags = instance.tags
+            if (tags.get('parent_component') == cluster_component
+                    and tags.get('parent_instance') == cluster_instance):
+                ret.append(instance)
+
+        return ret
+
+    def provision_master(self, cluster_component, cluster_instance):
+        """
+        Provision a master instance in EC2.  The master instance is a special instance with the
+        following features:
+            - Public IP
+            - /home shared over NFS
+
+        @param cluster_component  - parent cluster of each instance
+        @param cluster_instance   - instance id of the owning cluster
+
+        @return                   - boto.ec2.instance.Instances provisioned
+        """
+        vpc = boto.vpc.VPCConnection()
+        subnet = vpc.get_all_subnets(subnet_ids=__default_subnet__)[0]
+        cidr_block = subnet.cidr_block
+        vpc.close()
+
+        user_data = "#cloud-config\n"
+        user_data += "runcmd:\n"
+        user_data += " - echo '/home %s(rw,root_squash,sync)' >  /etc/exports\n" % (cidr_block,)
+        user_data += " - systemctl start nfs-server\n"
+        user_data += " - systemctl enable nfs-server\n"
+        user_data += self.cloud_init_yum_repos()
+        user_data += self.cloud_init_current_user()
+
+
+        net_if = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+                subnet_id=__default_subnet__,
+                groups=[__default_security_group__,],
+                associate_public_ip_address=True)
+
+        net_ifs = boto.ec2.networkinterface.NetworkInterfaceCollection(net_if)
+
+        new_reservation = self._conn.run_instances(
+                image_id=self._ami,
+                min_count=1,
+                max_count=1,
+                instance_type=__default_instance_type__,
+                network_interfaces=net_ifs,
+                tenancy='default',
+                user_data=user_data)
+        instance = new_reservation.instances[0]
+
+        instance.add_tag('parent_component', cluster_component)
+        instance.add_tag('parent_instance', cluster_instance)
+        instance.add_tag('master', 'self')
+
+        return instance
+
+
+    def provision(self, cluster_component, cluster_instance, n_instances=1, master_instance=None, net_ifs=None):
+        """
+        Provision a number of EC2 instanced to be used in a cluster.
+
+        @param cluster_component  - parent cluster of each instance
+        @param cluster_instance   - instance id of the owning cluster
+        @param n_instances        - number of requested instances
+        @param master_instance    - if specified, the boto.ec2.instance.Instance that is providing master
+                                    services for this cluster
+
+        @return                   - list of boto.ec2.instance.Instances provisioned
+        """
+        instances = []
+        cluster_instance = int(cluster_instance)
+
+        def posess_instance(instance):
+            instances.append(instance)
+            instance.add_tag('parent_component', cluster_component)
+            instance.add_tag('parent_instance', cluster_instance)
+            if master_instance is not None:
+                instance.add_tag('master', master_instance.id)
+            else:
+                instance.add_tag('master', 'None')
+
+        user_data = "#cloud-config\n"
+        user_data += self.cloud_init_current_user()
+        user_data += "runcmd:\n"
+        user_data += self.cloud_init_yum_repos()
+
+        if master_instance is not None:
+            user_data += " - echo '%s:/home /home nfs rw,soft,sync 0 0' >> /etc/fstab\n" % (
+                    master_instance.private_ip_address,)
+            user_data += " - mount /home\n"
+
+        if net_ifs is not None:
+            kwds = {'subnet_id': __default_subnet__}
+        else:
+            kwds = {'network_interfaces': net_ifs}
+            print net_ifs
+
+        new_reservation = self._conn.run_instances(
+            image_id=self._ami,
+            min_count=n_instances,
+            max_count=n_instances,
+            instance_type=__default_instance_type__,
+            tenancy='default',
+            user_data=user_data,
+            network_interfaces=net_ifs)
+
+        _ = [posess_instance(i) for i in new_reservation.instances]
+
+        return instances
+
+    def stop(self, instance_id, free_resources=True):
+        """
+        Stop the specified instance, freeing all allocated resources (elastic ips, etc) if requested.
+
+        @param instance_id      - name of the instance to stop
+        @param free_resource    - If True that all resources that were only owned by this instance
+                                  will be deallocated as well.
+        """
+        self._conn.terminate_instances(instance_ids=[instance_id,])
+
+    def fastpath111(self):
+        vpc_conn = boto.vpc.VPCConnection()
+        vpc = vpc_conn.get_all_vpcs(vpc_ids=[__default_vpc__,])[0]
+        subnet_addrs_split = vpc.cidr_block.split('.')
+
+        networks = {
+            'mgmt': [s for s in vpc_conn.get_all_subnets() if s.id == __default_subnet__][0],
+            'tg_fabric': None,
+            'ts_fabric': None,
+            'tg_lb_ext': None,
+            'lb_ts_ext': None,
+        }
+
+        for i, network in enumerate([n for n, s in networks.items() if s == None]):
+            addr = "%s.%s.10%d.0/25" % (subnet_addrs_split[0], subnet_addrs_split[1], i)
+            try:
+                subnet = vpc_conn.create_subnet(vpc.id, addr)
+            except boto.exception.EC2ResponseError, e:
+                if 'InvalidSubnet.Conflict' == e.error_code:
+                    subnet = vpc_conn.get_all_subnets(filters=[('vpcId', vpc.id), ('cidrBlock', addr)])[0]
+                else:
+                    raise
+
+            networks[network] = subnet
+
+        def create_interfaces(nets):
+            ret = boto.ec2.networkinterface.NetworkInterfaceCollection()
+
+            for i, network in enumerate(nets):
+                spec = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+                        subnet_id=networks[network].id,
+                        description='%s iface' % (network,),
+                        groups=[__default_security_group__],
+                        device_index=i)
+                ret.append(spec)
+
+            return ret
+
+        ret = {}
+
+        ret['cli'] = self.provision_master('fp111', 1)
+        ret['cli'].add_tag('Name', 'cli')
+
+        net_ifs = create_interfaces(['mgmt'])
+        ret['mgmt'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['mgmt'].add_tag('Name', 'mgmt')
+
+        net_ifs = create_interfaces(['mgmt', 'tg_fabric'])
+        ret['tg1'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['tg1'].add_tag('Name', 'tg1')
+
+        net_ifs = create_interfaces(['mgmt', 'tg_fabric', 'tg_lb_ext'])
+        ret['tg2'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['tg2'].add_tag('Name', 'tg2')
+
+        net_ifs = create_interfaces(['mgmt', 'ts_fabric'])
+        ret['ts1'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['ts1'].add_tag('Name', 'ts1')
+
+        net_ifs = create_interfaces(['mgmt', 'ts_fabric', 'lb_ts_ext'])
+        ret['ts3'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['ts3'].add_tag('Name', 'ts3')
+
+        net_ifs = create_interfaces(['mgmt', 'ts_fabric', 'lb_ts_ext', 'tg_lb_ext'])
+        ret['ts2'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['ts2'].add_tag('Name', 'ts2')
+
+        return ret
+
+# vim: sw=4
diff --git a/rwcal/test/openstack_resources.py b/rwcal/test/openstack_resources.py
new file mode 100755
index 0000000..f7fb00d
--- /dev/null
+++ b/rwcal/test/openstack_resources.py
@@ -0,0 +1,483 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+import logging
+import rw_peas
+import rwlogger
+import time
+import argparse
+import os
+import sys
+import uuid
+from os.path import basename
+
+FLAVOR_NAME = 'm1.medium'
+DEFAULT_IMAGE='/net/sharedfiles/home1/common/vm/rift-root-latest.qcow2'
+
+persistent_resources = {
+    'vms'      : ['mission_control','launchpad',],
+    'networks' : ['public', 'private', 'multisite'],
+    'flavors'  : ['m1.tiny', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge'],
+    'images'   : ['rwimage','rift-root-latest.qcow2','rift-root-latest-trafgen.qcow2', 'rift-root-latest-trafgen-f.qcow2']
+}
+
+#
+# Important information about openstack installation. This needs to be manually verified 
+#
+openstack_info = {
+    'username'           : 'pluto',
+    'password'           : 'mypasswd',
+    'project_name'       : 'demo',
+    'mgmt_network'       : 'private',
+    'physical_network'   : 'physnet1',
+    'network_type'       : 'VLAN',
+    'segmentation_id'    : 42, ### What else?
+    'subnets'            : ["11.0.0.0/24", "12.0.0.0/24", "13.0.0.0/24", "14.0.0.0/24"],
+    'subnet_index'       : 0,
+    }
+
+
+logging.basicConfig(level=logging.INFO)
+
+USERDATA_FILENAME = os.path.join(os.environ['RIFT_INSTALL'],
+                                 'etc/userdata-template')
+
+
+RIFT_BASE_USERDATA = '''
+#cloud-config
+runcmd:
+ - sleep 5
+ - /usr/rift/scripts/cloud/enable_lab
+ - /usr/rift/etc/fix_this_vm
+'''
+
+try:
+    fd = open(USERDATA_FILENAME, 'r')
+except Exception as e:
+    #logger.error("Received exception during opening of userdata (%s) file. Exception: %s" %(USERDATA_FILENAME, str(e)))
+    sys.exit(-1)
+else:
+    LP_USERDATA_FILE = fd.read()
+    # Run the enable lab script when the openstack vm comes up
+    LP_USERDATA_FILE += "runcmd:\n"
+    LP_USERDATA_FILE += " - /usr/rift/scripts/cloud/enable_lab\n"
+    LP_USERDATA_FILE += " - /usr/rift/etc/fix_this_vm\n"
+
+
+
+def get_cal_plugin():
+    """
+    Loads rw.cal plugin via libpeas
+    """
+    plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+    engine, info, extension = plugin()
+    cal = plugin.get_interface("Cloud")
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+    try:
+        rc = cal.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except:
+        logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+    else:
+        logger.info("Openstack Cal plugin successfully instantiated")
+        return cal 
+    
+def get_cal_account(auth_url):
+    """
+    Returns cal account
+    """
+    account                        = RwcalYang.CloudAccount()
+    account.account_type           = "openstack"
+    account.openstack.key          = openstack_info['username']
+    account.openstack.secret       = openstack_info['password']
+    account.openstack.auth_url     = auth_url
+    account.openstack.tenant       = openstack_info['project_name']
+    account.openstack.mgmt_network = openstack_info['mgmt_network']
+    return account
+
+
+logger = logging.getLogger('rift.cal.openstackresources')
+
+class OpenstackResources(object):
+    """
+    A stupid class to manage bunch of openstack resources
+    """
+    def __init__(self, controller):    
+        self._cal      = get_cal_plugin()
+        self._acct     = get_cal_account('http://'+controller+':5000/v3/')
+        self._id       = 0
+        self._image_id = None
+        self._flavor_id = None
+        
+    def _destroy_vms(self):
+        """
+        Destroy VMs
+        """
+        logger.info("Initiating VM cleanup")
+        rc, rsp = self._cal.get_vdu_list(self._acct)
+        vdu_list = [vm for vm in rsp.vdu_info_list if vm.name not in persistent_resources['vms']]
+        logger.info("Deleting VMs : %s" %([x.name for x in vdu_list]))
+        
+        for vdu in vdu_list:
+            self._cal.delete_vdu(self._acct, vdu.vdu_id)
+
+        logger.info("VM cleanup complete")
+
+    def _destroy_networks(self):
+        """
+        Destroy Networks
+        """
+        logger.info("Initiating Network cleanup")
+        rc, rsp = self._cal.get_virtual_link_list(self._acct)
+        vlink_list = [vlink for vlink in rsp.virtual_link_info_list if vlink.name not in persistent_resources['networks']]
+
+        logger.info("Deleting Networks : %s" %([x.name for x in vlink_list]))
+        for vlink in vlink_list:
+            self._cal.delete_virtual_link(self._acct, vlink.virtual_link_id)
+        logger.info("Network cleanup complete")
+
+    def _destroy_flavors(self):
+        """
+        Destroy Flavors
+        """
+        logger.info("Initiating flavor cleanup")
+        rc, rsp = self._cal.get_flavor_list(self._acct)
+        flavor_list = [flavor for flavor in rsp.flavorinfo_list if flavor.name not in persistent_resources['flavors']]
+            
+        logger.info("Deleting flavors : %s" %([x.name for x in flavor_list]))
+
+        for flavor in flavor_list:
+            self._cal.delete_flavor(self._acct, flavor.id)
+            
+        logger.info("Flavor cleanup complete")
+
+    def _destroy_images(self):
+        logger.info("Initiating image cleanup")
+        rc, rsp = self._cal.get_image_list(self._acct)
+        image_list = [image for image in rsp.imageinfo_list if image.name not in persistent_resources['images']]
+
+        logger.info("Deleting images : %s" %([x.name for x in image_list]))
+            
+        for image in image_list:
+            self._cal.delete_image(self._acct, image.id)
+            
+        logger.info("Image cleanup complete")
+        
+    def destroy_resource(self):
+        """
+        Destroy resources
+        """
+        logger.info("Cleaning up openstack resources")
+        self._destroy_vms()
+        self._destroy_networks()
+        self._destroy_flavors()
+        self._destroy_images()
+        logger.info("Cleaning up openstack resources.......[Done]")
+
+    def create_mission_control(self):
+        vm_id = self.create_vm('mission_control',
+                               userdata = RIFT_BASE_USERDATA)
+        return vm_id
+    
+
+    def create_launchpad_vm(self, salt_master=None):
+        node_id = str(uuid.uuid4())
+        if salt_master is not None:
+           userdata = LP_USERDATA_FILE.format(master_ip = salt_master,
+                                           lxcname = node_id)
+        else:
+           userdata = RIFT_BASE_USERDATA
+
+        vm_id = self.create_vm('launchpad',
+                              userdata = userdata,
+                              node_id = node_id)
+#        vm_id = self.create_vm('launchpad2',
+#                               userdata = userdata,
+#                               node_id = node_id)
+        return vm_id
+    
+    def create_vm(self, name, userdata, node_id = None):
+        """
+        Creates a VM. The VM name is derived from username
+
+        """
+        vm = RwcalYang.VDUInitParams()
+        vm.name = name
+        vm.flavor_id = self._flavor_id
+        vm.image_id  = self._image_id
+        if node_id is not None:
+            vm.node_id = node_id
+        vm.vdu_init.userdata = userdata
+        vm.allocate_public_address = True
+        logger.info("Starting a VM with parameter: %s" %(vm))
+     
+        rc, vm_id = self._cal.create_vdu(self._acct, vm)
+        assert rc == RwStatus.SUCCESS
+        logger.info('Created vm: %s with id: %s', name, vm_id)
+        return vm_id
+        
+    def create_network(self, name):
+        logger.info("Creating network with name: %s" %name)
+        network                = RwcalYang.NetworkInfoItem()
+        network.network_name   = name
+        network.subnet         = openstack_info['subnets'][openstack_info['subnet_index']]
+
+        if openstack_info['subnet_index'] == len(openstack_info['subnets']):
+            openstack_info['subnet_index'] = 0
+        else:
+            openstack_info['subnet_index'] += 1
+        
+        if openstack_info['physical_network']:
+            network.provider_network.physical_network = openstack_info['physical_network']
+        if openstack_info['network_type']:
+            network.provider_network.overlay_type     = openstack_info['network_type']
+        if openstack_info['segmentation_id']:
+            network.provider_network.segmentation_id  = openstack_info['segmentation_id']
+            openstack_info['segmentation_id'] += 1
+
+        rc, net_id = self._cal.create_network(self._acct, network)
+        assert rc == RwStatus.SUCCESS
+
+        logger.info("Successfully created network with id: %s" %net_id)
+        return net_id
+    
+        
+
+    def create_image(self, location):
+        img = RwcalYang.ImageInfoItem()
+        img.name = basename(location)
+        img.location = location
+        img.disk_format = "qcow2"
+        img.container_format = "bare"
+
+        logger.info("Uploading image : %s" %img.name)
+        rc, img_id = self._cal.create_image(self._acct, img)
+        assert rc == RwStatus.SUCCESS
+
+        rs = None
+        rc = None
+        image = None
+        for i in range(100):
+            rc, rs = self._cal.get_image(self._acct, img_id)
+            assert rc == RwStatus.SUCCESS
+            logger.info("Image (image_id: %s) reached status : %s" %(img_id, rs.state))
+            if rs.state == 'active':
+                image = rs
+                break
+            else:
+                time.sleep(2) # Sleep for a second
+
+        if image is None:
+            logger.error("Failed to upload openstack image: %s", img)
+            sys.exit(1)
+
+        self._image_id = img_id
+        logger.info("Uploading image.......[Done]")
+        
+    def create_flavor(self):
+        """
+        Create Flavor suitable for rift_ping_pong VNF
+        """
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name = FLAVOR_NAME
+        flavor.vm_flavor.memory_mb   = 16384 # 16GB
+        flavor.vm_flavor.vcpu_count  = 4 
+        flavor.vm_flavor.storage_gb  = 20 # 20 GB
+
+        logger.info("Creating new flavor. Flavor Info: %s" %str(flavor.vm_flavor))
+
+        rc, flavor_id = self._cal.create_flavor(self._acct, flavor)
+        assert rc == RwStatus.SUCCESS
+        logger.info("Creating new flavor.......[Done]")
+        return flavor_id
+
+    def find_image(self, name):
+        logger.info("Searching for uploaded image: %s" %name)
+        rc, rsp = self._cal.get_image_list(self._acct)
+        image_list = [image for image in rsp.imageinfo_list if image.name ==  name]
+
+        if not image_list:
+            logger.error("Image %s not found" %name)
+            return None
+
+        self._image_id = image_list[0].id
+        logger.info("Searching for uploaded image.......[Done]")
+        return self._image_id
+
+    def find_flavor(self, name=FLAVOR_NAME):
+        logger.info("Searching for required flavor: %s" %name)
+        rc, rsp = self._cal.get_flavor_list(self._acct)
+        flavor_list = [flavor for flavor in rsp.flavorinfo_list if flavor.name == name]
+
+        if not flavor_list:
+            logger.error("Flavor %s not found" %name)
+            self._flavor_id = self.create_flavor()
+        else:
+            self._flavor_id = flavor_list[0].id
+
+        logger.info("Searching for required flavor.......[Done]")
+        return self._flavor_id
+
+        
+    
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to manage openstack resources')
+    
+    parser.add_argument('--controller',
+                        action = 'store',
+                        dest = 'controller',
+                        type = str,
+                        help='IP Address of openstack controller. This is mandatory parameter')
+
+    parser.add_argument('--cleanup',
+                        action = 'store',
+                        dest = 'cleanup',
+                        nargs = '+',
+                        type = str,
+                        help = 'Perform resource cleanup for openstack installation. \n Possible options are {all, flavors, vms, networks, images}')
+
+    parser.add_argument('--persist-vms',
+                        action = 'store',
+                        dest = 'persist_vms',
+                        help = 'VM instance name to persist')
+
+    parser.add_argument('--salt-master',
+                        action = 'store',
+                        dest = 'salt_master',
+                        type = str,
+                        help='IP Address of salt controller. Required, if VMs are being created.')
+
+    parser.add_argument('--upload-image',
+                        action = 'store',
+                        dest = 'upload_image',
+                        help='Openstack image location to upload and use when creating vms.x')
+
+    parser.add_argument('--use-image',
+                        action = 'store',
+                        dest = 'use_image',
+                        help='Image name to be used for VM creation')
+
+    parser.add_argument('--use-flavor',
+                        action = 'store',
+                        dest = 'use_flavor',
+                        help='Flavor name to be used for VM creation')
+    
+    parser.add_argument('--mission-control',
+                        action = 'store_true',
+                        dest = 'mission_control',
+                        help='Create Mission Control VM')
+
+
+    parser.add_argument('--launchpad',
+                        action = 'store_true',
+                        dest = 'launchpad',
+                        help='Create LaunchPad VM')
+
+    parser.add_argument('--use-project',
+                        action = 'store',
+                        dest = 'use_project',
+                        help='Project name to be used for VM creation')
+
+    parser.add_argument('--clean-mclp',
+                        action='store_true',
+                        dest='clean_mclp',
+                        help='Remove Mission Control and Launchpad VMs')
+
+    argument = parser.parse_args()
+
+    if argument.persist_vms is not None:
+        global persistent_resources
+        vm_name_list = argument.persist_vms.split(',')
+        for single_vm in vm_name_list:
+                persistent_resources['vms'].append(single_vm)
+        logger.info("persist-vms: %s" % persistent_resources['vms'])
+
+    if argument.clean_mclp:
+        persistent_resources['vms'] = []
+
+    if argument.controller is None:
+        logger.error('Need openstack controller IP address')
+        sys.exit(-1)
+
+    
+    if argument.use_project is not None:
+        openstack_info['project_name'] = argument.use_project
+
+    ### Start processing
+    logger.info("Instantiating cloud-abstraction-layer")
+    drv = OpenstackResources(argument.controller)
+    logger.info("Instantiating cloud-abstraction-layer.......[Done]")
+
+        
+    if argument.cleanup is not None:
+        for r_type in argument.cleanup:
+            if r_type == 'all':
+                drv.destroy_resource()
+                break
+            if r_type == 'images':
+                drv._destroy_images()
+            if r_type == 'flavors':
+                drv._destroy_flavors()
+            if r_type == 'vms':
+                drv._destroy_vms()
+            if r_type == 'networks':
+                drv._destroy_networks()
+
+    if argument.upload_image is not None:
+        image_name_list = argument.upload_image.split(',')
+        logger.info("Will upload %d image(s): %s" % (len(image_name_list), image_name_list))
+        for image_name in image_name_list:
+            drv.create_image(image_name)
+            #print("Uploaded :", image_name)
+
+    elif argument.use_image is not None:
+        img = drv.find_image(argument.use_image)
+        if img == None:
+            logger.error("Image: %s not found" %(argument.use_image))
+            sys.exit(-4)
+    else:
+        if argument.mission_control or argument.launchpad:
+            img = drv.find_image(basename(DEFAULT_IMAGE))
+            if img == None:
+                drv.create_image(DEFAULT_IMAGE)
+
+    if argument.use_flavor is not None:
+        drv.find_flavor(argument.use_flavor)
+    else:
+        drv.find_flavor()
+        
+    if argument.mission_control == True:
+        drv.create_mission_control()
+
+    if argument.launchpad == True:
+        drv.create_launchpad_vm(salt_master = argument.salt_master)
+        
+    
+if __name__ == '__main__':
+    main()
+        
diff --git a/rwcal/test/rwcal_callback_gtest.cpp b/rwcal/test/rwcal_callback_gtest.cpp
new file mode 100644
index 0000000..52dc6f6
--- /dev/null
+++ b/rwcal/test/rwcal_callback_gtest.cpp
@@ -0,0 +1,79 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+#include <rwut.h>
+
+#include "rwcal-api.h"
+
+struct test_struct {
+  int accessed;
+};
+
+struct test_struct g_test_struct;
+
+class RWCalCallbackTest : public ::testing::Test {
+  /*
+   * This is a tough one to test as we're really relying on the
+   * gobject introspection to do all the data marshalling for us
+   * correctly.  At this point, all I can think of to do is to
+   * just create a closure and then call it the same way it would
+   * typically be called in C and make sure that everything
+   * executed as expected.
+   */
+ protected:
+  rwcal_module_ptr_t rwcal;
+
+  virtual void SetUp() {
+    rwcal = rwcal_module_alloc();
+    ASSERT_TRUE(rwcal);
+
+    g_test_struct.accessed = 0;
+  }
+
+  virtual void TearDown() {
+    rwcal_module_free(&rwcal);
+  }
+
+  virtual void TestSuccess() {
+    ASSERT_TRUE(rwcal);
+#if 0
+    rwcal_closure_ptr_t closure;
+
+    closure = rwcal_closure_alloc(
+        rwcal,
+        &update_accessed,
+        (void *)&g_test_struct);
+    ASSERT_TRUE(closure);
+
+    ASSERT_EQ(g_test_struct.accessed, 0);
+    rw_cal_closure_callback(closure);
+    ASSERT_EQ(g_test_struct.accessed, 1);
+
+    rwcal_closure_free(&closure);
+    ASSERT_FALSE(closure);
+#endif
+  }
+};
+
+
+TEST_F(RWCalCallbackTest, TestSuccess) {
+  TestSuccess();
+}
diff --git a/rwcal/test/rwcal_dump.cpp b/rwcal/test/rwcal_dump.cpp
new file mode 100644
index 0000000..ff6fd73
--- /dev/null
+++ b/rwcal/test/rwcal_dump.cpp
@@ -0,0 +1,77 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+/**
+ * @file cal_dump
+ * @author Jeremy Mordkoff
+ * @date 05/14/2015 
+ * @brief test program to dump what we can glean from an installation
+ */
+
+
+#include <limits.h>
+#include <cstdlib>
+#include <iostream>
+
+#include "rwcal-api.h"
+
+
+int main(int argc, char ** argv, char ** envp)
+{
+
+#if 0
+    rw_status_t status;
+    rwcal_module_ptr_t m_mod;
+    Rwcal__YangData__Rwcal__Flavorinfo__FlavorinfoList  *flavor;
+    rwpb_gi_Rwcal_FlavorInfo *flavors;
+    Rwcal__YangData__Rwcal__Flavorinfo *flavorinfo;
+    unsigned int i;
+    char url[128];
+
+    if (argc != 4 ) {
+    	fprintf(stderr, "args are IP user password\n");
+    	return(1);
+    }
+    snprintf(url, 128, "http://%s:35357/v2.0/tokens", argv[1] );
+
+    m_mod = rwcal_module_alloc();
+    status = rwcal_cloud_init(m_mod, RW_MANIFEST_RWCAL_CLOUD_TYPE_OPENSTACK_AUTH_URL, argv[2], argv[3], url );
+    if (status != RW_STATUS_SUCCESS)
+      return status;
+
+    status = rwcal_cloud_flavor_infos(m_mod, &flavors);
+    if (status != RW_STATUS_SUCCESS)
+      return status;
+    flavorinfo = flavors->s.message;
+    printf("ID                                       NAME             MEM    DISK VCPU PCI  HP TC\n");
+    printf("---------------------------------------- ---------------- ------ ---- ---- ---- -- --\n");
+    for (i = 0; i<flavorinfo->n_flavorinfo_list; i++) {
+      flavor = flavorinfo->flavorinfo_list[i];
+      printf("%-40s %-16s %6d %4d %4d %4d %2d %2d\n", flavor->id, flavor->name, flavor->memory, flavor->disk, flavor->vcpus, flavor->pci_passthru_bw, 
+              flavor->has_huge_pages, flavor->trusted_host_only );
+    }
+
+    rwcal__yang_data__rwcal__flavorinfo__gi_unref(flavors);
+#endif
+    return 0;
+
+}
+
diff --git a/rwcal/test/test_container_cal.py b/rwcal/test/test_container_cal.py
new file mode 100644
index 0000000..3ec5ca1
--- /dev/null
+++ b/rwcal/test/test_container_cal.py
@@ -0,0 +1,159 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import logging
+import os
+import sys
+import time
+
+import rw_peas
+import rwlogger
+
+from gi.repository import RwcalYang
+
+import rift.rwcal.cloudsim
+import rift.rwcal.cloudsim.lxc as lxc
+
+logger = logging.getLogger('rift.cal')
+
+
+def main(argv=sys.argv[1:]):
+    """
+    Assuming that an LVM backing-store has been created with a volume group
+    called 'rift', the following creates an lxc 'image' and a pair of 'vms'.
+    In the LXC based container CAL, an 'image' is container and a 'vm' is a
+    snapshot of the original container.
+
+    In addition to the LVM backing store, it is assumed that there is a network
+    bridge called 'virbr0'.
+
+    """
+    logging.basicConfig(level=logging.DEBUG)
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--rootfs', '-r')
+    parser.add_argument('--num-vms', '-n', type=int, default=2)
+    parser.add_argument('--terminate', '-t', action='store_true')
+
+    args = parser.parse_args(argv)
+
+    # Acquire the plugin from peas
+    plugin = rw_peas.PeasPlugin('rwcal-plugin', 'RwCal-1.0')
+    engine, info, extension = plugin()
+
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+    cal = plugin.get_interface("Cloud")
+    cal.init(rwloggerctx)
+
+    # The account object is not currently used, but it is required by the CAL
+    # interface, so we create an empty object here to represent it.
+    account = RwcalYang.CloudAccount()
+    account.account_type = "lxc"
+
+    # Make sure that any containers that were previously created have been
+    # stopped and destroyed.
+    containers = lxc.containers()
+
+    for container in containers:
+        lxc.stop(container)
+
+    for container in containers:
+        lxc.destroy(container)
+
+    template = os.path.join(
+            os.environ['RIFT_INSTALL'],
+            'etc/lxc-fedora-rift.lxctemplate',
+            )
+
+    logger.info(template)
+    logger.info(args.rootfs)
+
+    # Create an image that can be used to create VMs
+    image = RwcalYang.ImageInfoItem()
+    image.name = 'rift-master'
+    image.lxc.size = '2.5G'
+    image.lxc.template_path = template
+    image.lxc.tarfile = args.rootfs
+
+    cal.create_image(account, image)
+
+    # Create a VM
+    vms = []
+    for index in range(args.num_vms):
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_name = 'rift-s{}'.format(index + 1)
+        vm.image_id = image.id
+
+        cal.create_vm(account, vm)
+
+        vms.append(vm)
+
+    # Create the default and data networks
+    network = RwcalYang.NetworkInfoItem(network_name='virbr0')
+    cal.create_network(account, network)
+
+    os.system('/usr/sbin/brctl show')
+
+    # Create pairs of ports to connect the networks
+    for index, vm in enumerate(vms):
+        port = RwcalYang.PortInfoItem()
+        port.port_name = "eth0"
+        port.network_id = network.network_id
+        port.vm_id = vm.vm_id
+        port.ip_address = "192.168.122.{}".format(index + 101)
+        port.lxc.veth_name = "rws{}".format(index)
+
+        cal.create_port(account, port)
+
+    # Swap out the current instance of the plugin to test that the data is
+    # shared among different instances
+    cal = plugin.get_interface("Cloud")
+    cal.init()
+
+    # Start the VMs
+    for vm in vms:
+        cal.start_vm(account, vm.vm_id)
+
+    lxc.ls()
+
+    # Exit if the containers are not supposed to be terminated
+    if not args.terminate:
+        return
+
+    time.sleep(3)
+
+    # Stop the VMs
+    for vm in vms:
+        cal.stop_vm(account, vm.vm_id)
+
+    lxc.ls()
+
+    # Delete the VMs
+    for vm in vms:
+        cal.delete_vm(account, vm.vm_id)
+
+    # Delete the image
+    cal.delete_image(account, image.id)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/rwcal/test/test_openstack_install.py b/rwcal/test/test_openstack_install.py
new file mode 100644
index 0000000..0e4a61f
--- /dev/null
+++ b/rwcal/test/test_openstack_install.py
@@ -0,0 +1,567 @@
+"""
+#
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+#
+# @file test_openstack_install.py
+# @author Varun Prasad (varun.prasad@riftio.com)
+# @date 10/10/2015
+# @brief Test Openstack/os install
+#
+"""
+
+import logging
+import re
+import socket
+import sys
+import time
+import tempfile
+
+from keystoneclient.v3 import client
+import paramiko
+import pytest
+import requests
+import xmlrpc.client
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+import rw_peas
+import rwlogger
+
+
+logger = logging.getLogger()
+logging.basicConfig(level=logging.INFO)
+
+
+class Host(object):
+    """A wrapper on top of a host, which provides a ssh connection instance.
+
+    Assumption:
+    The username/password for the VM is default.
+    """
+    _USERNAME = "root"
+    _PASSWORD = "riftIO"
+
+    def __init__(self, hostname):
+        """
+        Args:
+            hostname (str): Hostname (grunt3.qanet.riftio.com)
+        """
+        self.hostname = hostname
+        try:
+            self.ip = socket.gethostbyname(hostname)
+        except socket.gaierror:
+            logger.error("Unable to resolve the hostname {}".format(hostname))
+            sys.exit(1)
+
+        self.ssh = paramiko.SSHClient()
+        # Note: Do not load the system keys as the test will fail if the keys
+        # change.
+        self.ssh.set_missing_host_key_policy(paramiko.WarningPolicy())
+
+    def connect(self):
+        """Set up ssh connection.
+        """
+        logger.debug("Trying to connect to {}: {}".format(
+                self.hostname,
+                self.ip))
+
+        self.ssh.connect(
+                self.ip,
+                username=self._USERNAME,
+                password=self._PASSWORD)
+
+    def put(self, content, dest):
+        """Creates a tempfile and puts it in the destination path in the HOST.
+        Args:
+            content (str): Content to be written to a file.
+            dest (str): Path to store the content.
+        """
+        temp_file = tempfile.NamedTemporaryFile(delete=False)
+        temp_file.write(content.encode("UTF-8"))
+        temp_file.close()
+
+        logger.info("Writing {} file in {}".format(dest, self.hostname))
+        sftp = self.ssh.open_sftp()
+        sftp.put(temp_file.name, dest)
+        sftp.close()
+
+    def clear(self):
+        """Clean up
+        """
+        self.ssh.close()
+
+
+class Grunt(Host):
+    """A wrapper on top of grunt machine, provides functionalities to check
+    if the grunt is up, IP resolution.
+    """
+    @property
+    def grunt_name(self):
+        """Extract the grunt name from the FQDN
+
+        Returns:
+            str: e.g. grunt3 from grunt3.qanet.riftio.com
+        """
+        return self.hostname.split(".")[0]
+
+    @property
+    def dns_server(self):
+        """Hard-coded for now.
+        """
+        return "10.95.0.3"
+
+    @property
+    def floating_ip(self):
+        return "10.95.1.0"
+
+    @property
+    def private_ip(self):
+        """Construct the private IP from the grunt name. 10.0.xx.0 where xx is
+        value of the grunt (3 in case of grunt3)
+        """
+        host_part = re.sub(r"[a-zA-z]+", "", self.grunt_name)
+        return '10.0.{}.0'.format(host_part)
+
+    def is_system_up(self):
+        """Checks if system is up using ssh login.
+
+        Returns:
+            bool: Indicates if system is UP
+        """
+        try:
+            self.connect()
+        except OSError:
+            return False
+
+        return True
+
+    def wait_till_system_is_up(self, timeout=50, check_openstack=False):
+        """Blocking call to check if system is up.
+        Args:
+            timeout (int, optional): In mins(~).
+            check_openstack (bool, optional): If true will also check if
+                openstack is up and running on the system.
+
+        Raises:
+            OSError: If system start exceeds the timeout
+        """
+
+        TRY_DURATION = 20  # secs
+        total_tries = timeout * (60 / TRY_DURATION)  # 3 tries/mins i.e. 20 secs.
+        tries = 0
+
+        while tries < total_tries:
+            if self.is_system_up():
+                if check_openstack and self.is_openstack_up():
+                        return
+                elif not check_openstack:
+                    return
+
+            logger.info("{} down: Sleeping for {} secs. Try {} of {}".format(
+                    self.hostname,
+                    TRY_DURATION,
+                    tries,
+                    int(total_tries)))
+
+            time.sleep(TRY_DURATION)
+            tries += 1
+
+        raise OSError("Exception in system start {}({})".format(
+                self.hostname,
+                self.ip))
+
+    def is_openstack_up(self):
+        """Checks if openstack is UP, by verifying the URL.
+
+        Returns:
+            bool: Indicates if system is UP
+        """
+        url = "http://{}/dashboard/".format(self.ip)
+
+        logger.info("Checking if openstack({}) is UP".format(url))
+
+        try:
+            requests.get(url)
+        except requests.ConnectionError:
+            return False
+
+        return True
+
+
+class Cobbler(Host):
+    """A thin wrapper on cobbler and provides an interface using XML rpc client.
+
+    Assumption:
+    System instances are already added to cobbler(with ipmi). Adding instances
+    can also be automated, can be taken up sometime later.
+    """
+    def __init__(self, hostname, username="cobbler", password="cobbler"):
+        """
+        Args:
+            hostname (str): Cobbler host.
+            username (str, optional): username.
+            password (str, optional): password
+        """
+        super().__init__(hostname)
+
+        url = "https://{}/cobbler_api".format(hostname)
+
+        self.server = xmlrpc.client.ServerProxy(url)
+        logger.info("obtained a cobbler instance for the host {}".format(hostname))
+
+        self.token = self.server.login(username, password)
+        self.connect()
+
+    def create_profile(self, profile_name, ks_file):
+        """Create the profile for the system.
+
+        Args:
+            profile_name (str): Name of the profile.
+            ks_file (str): Path of the kick start file.
+        """
+        profile_attrs = {
+                "name": profile_name,
+                "kickstart": ks_file,
+                "repos": ['riftware', 'rift-misc', 'fc21-x86_64-updates',
+                          'fc21-x86_64', 'openstack-kilo'],
+                "owners": ["admin"],
+                "distro": "FC21.3-x86_64"
+                }
+
+        profile_id = self.server.new_profile(self.token)
+        for key, value in profile_attrs.items():
+            self.server.modify_profile(profile_id, key, value, self.token)
+        self.server.save_profile(profile_id, self.token)
+
+    def create_snippet(self, snippet_name, snippet_content):
+        """Unfortunately the XML rpc apis don't provide a direct interface to
+        create snippets, so falling back on the default sftp methods.
+
+        Args:
+            snippet_name (str): Name.
+            snippet_content (str): snippet's content.
+
+        Returns:
+            str: path where the snippet is stored
+        """
+        path = "/var/lib/cobbler/snippets/{}".format(snippet_name)
+        self.put(snippet_content, path)
+        return path
+
+    def create_kickstart(self, ks_name, ks_content):
+        """Creates and returns the path of the ks file.
+
+        Args:
+            ks_name (str): Name of the ks file to be saved.
+            ks_content (str): Content for ks file.
+
+        Returns:
+            str: path where the ks file is saved.
+        """
+        path = "/var/lib/cobbler/kickstarts/{}".format(ks_name)
+        self.put(ks_content, path)
+        return path
+
+    def boot_system(self, grunt, profile_name, false_boot=False):
+        """Boots the system with the profile specified. Also enable net-boot
+
+        Args:
+            grunt (Grunt): instance of grunt
+            profile_name (str): A valid profile name.
+            false_boot (bool, optional): debug only option.
+        """
+        if false_boot:
+            return
+
+        system_id = self.server.get_system_handle(
+                grunt.grunt_name,
+                self.token)
+        self.server.modify_system(
+                system_id,
+                "profile",
+                profile_name,
+                self.token)
+
+        self.server.modify_system(
+                system_id,
+                "netboot_enabled",
+                "True",
+                self.token)
+        self.server.save_system(system_id, self.token)
+        self.server.power_system(system_id, "reboot", self.token)
+
+
+class OpenstackTest(object):
+    """Driver class to automate the installation.
+    """
+    def __init__(
+            self,
+            cobbler,
+            controller,
+            compute_nodes=None,
+            test_prefix="openstack_test"):
+        """
+        Args:
+            cobbler (Cobbler): Instance of Cobbler
+            controller (Controller): Controller node instance
+            compute_nodes (TYPE, optional): A list of Grunt nodes to be set up
+                    as compute nodes.
+            test_prefix (str, optional): All entities created by the script are
+                    prefixed with this string.
+        """
+        self.cobbler = cobbler
+        self.controller = controller
+        self.compute_nodes = [] if compute_nodes is None else compute_nodes
+        self.test_prefix = test_prefix
+
+    def _prepare_snippet(self):
+        """Prepares the config based on the controller and compute nodes.
+
+        Returns:
+            str: Openstack config content.
+        """
+        content = ""
+
+        config = {}
+        config['host_name'] = self.controller.grunt_name
+        config['ip'] = self.controller.ip
+        config['dns_server'] = self.controller.dns_server
+        config['private_ip'] = self.controller.private_ip
+        config['floating_ip'] = self.controller.floating_ip
+
+        content += Template.GRUNT_CONFIG.format(**config)
+        for compute_node in self.compute_nodes:
+            config["host_name"] = compute_node.grunt_name
+            content += Template.GRUNT_CONFIG.format(**config)
+
+        content = Template.SNIPPET_TEMPLATE.format(config=content)
+
+        return content
+
+    def prepare_profile(self):
+        """Creates the cobbler profile.
+        """
+        snippet_content = self._prepare_snippet()
+        self.cobbler.create_snippet(
+                "{}.cfg".format(self.test_prefix),
+                snippet_content)
+
+        ks_content = Template.KS_TEMPATE
+        ks_file = self.cobbler.create_kickstart(
+                "{}.ks".format(self.test_prefix),
+                ks_content)
+
+        self.cobbler.create_profile(self.test_prefix, ks_file)
+        return self.test_prefix
+
+    def _get_cal_account(self):
+        """
+        Creates an object for class RwcalYang.CloudAccount()
+        """
+        account                        = RwcalYang.CloudAccount()
+        account.account_type           = "openstack"
+        account.openstack.key          = "{}_user".format(self.test_prefix)
+        account.openstack.secret       = "mypasswd"
+        account.openstack.auth_url     = 'http://{}:35357/v3/'.format(self.controller.ip)
+        account.openstack.tenant       = self.test_prefix
+
+        return account
+
+    def start(self):
+        """Starts the installation.
+        """
+        profile_name = self.prepare_profile()
+
+        self.cobbler.boot_system(self.controller, profile_name)
+        self.controller.wait_till_system_is_up(check_openstack=True)
+
+        try:
+            logger.info("Controller system is UP. Setting up compute nodes")
+            for compute_node in self.compute_nodes:
+                self.cobbler.boot_system(compute_node, profile_name)
+                compute_node.wait_till_system_is_up()
+        except OSError as e:
+            logger.error("System set-up failed {}".format(e))
+            sys.exit(1)
+
+        # Currently we don't have wrapper on top of users/projects so using
+        # keystone API directly
+        acct = self._get_cal_account()
+
+        keystone_conn = client.Client(
+                auth_url=acct.openstack.auth_url,
+                username='admin',
+                password='mypasswd')
+
+        # Create a test project
+        project = keystone_conn.projects.create(
+                acct.openstack.tenant,
+                "default",
+                description="Openstack test project")
+
+        # Create an user
+        user = keystone_conn.users.create(
+                acct.openstack.key,
+                password=acct.openstack.secret,
+                default_project=project)
+
+        # Make the newly created user as ADMIN
+        admin_role = keystone_conn.roles.list(name="admin")[0]
+        keystone_conn.roles.grant(
+                admin_role.id,
+                user=user.id,
+                project=project.id)
+
+        # nova API needs to be restarted, otherwise the new service doesn't play
+        # well
+        self.controller.ssh.exec_command("source keystonerc_admin && "
+                "service openstack-nova-api restart")
+        time.sleep(10)
+
+        return acct
+
+    def clear(self):
+        """Close out all SFTP connections.
+        """
+        nodes = [self.controller]
+        nodes.extend(self.compute_nodes)
+        for node in nodes:
+            node.clear()
+
+
+###############################################################################
+## Begin pytests
+###############################################################################
+
+
+@pytest.fixture(scope="session")
+def cal(request):
+    """
+    Loads rw.cal plugin via libpeas
+    """
+    plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+    engine, info, extension = plugin()
+
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+    cal = plugin.get_interface("Cloud")
+    try:
+        rc = cal.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except:
+        logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+    else:
+        logger.info("Openstack Cal plugin successfully instantiated")
+
+    return cal
+
+
+@pytest.fixture(scope="session")
+def account(request):
+    """Creates an openstack instance with 1 compute node and returns the newly
+    created account.
+    """
+    cobbler = Cobbler("qacobbler.eng.riftio.com")
+    controller = Grunt("grunt3.qanet.riftio.com")
+    compute_nodes = [Grunt("grunt5.qanet.riftio.com")]
+
+    test = OpenstackTest(cobbler, controller, compute_nodes)
+    account = test.start()
+
+    request.addfinalizer(test.clear)
+    return account
+
+
+def test_list_images(cal, account):
+    """Verify if 2 images are present
+    """
+    status, resources = cal.get_image_list(account)
+    assert len(resources.imageinfo_list) == 2
+
+def test_list_flavors(cal, account):
+    """Basic flavor checks
+    """
+    status, resources = cal.get_flavor_list(account)
+    assert len(resources.flavorinfo_list) == 5
+
+
+class Template(object):
+    """A container to hold all cobbler related templates.
+    """
+    GRUNT_CONFIG = """
+{host_name})
+    CONTROLLER={ip}
+    BRGIF=1
+    OVSDPDK=N
+    TRUSTED=N
+    QAT=N
+    HUGEPAGE=0
+    VLAN=10:14
+    PRIVATE_IP={private_ip}
+    FLOATING_IP={floating_ip}
+    DNS_SERVER={dns_server}
+    ;;
+
+    """
+
+    SNIPPET_TEMPLATE = """
+# =====================Begining of snippet=================
+# snippet openstack_test.cfg
+case $name in
+
+{config}
+
+*)
+    ;;
+esac
+
+# =====================End of snippet=================
+
+"""
+
+    KS_TEMPATE = """
+$SNIPPET('rift-repos')
+$SNIPPET('rift-base')
+%packages
+@core
+wget
+$SNIPPET('rift-grunt-fc21-packages')
+ganglia-gmetad
+ganglia-gmond
+%end
+
+%pre
+$SNIPPET('log_ks_pre')
+$SNIPPET('kickstart_start')
+# Enable installation monitoring
+$SNIPPET('pre_anamon')
+%end
+
+%post --log=/root/ks_post.log
+$SNIPPET('openstack_test.cfg')
+$SNIPPET('ganglia')
+$SNIPPET('rift-post-yum')
+$SNIPPET('rift-post')
+$SNIPPET('rift_fix_grub')
+
+$SNIPPET('rdo-post')
+echo "banner RDO test" >> /etc/profile
+
+$SNIPPET('kickstart_done')
+%end
+"""
diff --git a/rwcal/test/test_rwcal_openstack.py b/rwcal/test/test_rwcal_openstack.py
new file mode 100644
index 0000000..4ce494b
--- /dev/null
+++ b/rwcal/test/test_rwcal_openstack.py
@@ -0,0 +1,1057 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import datetime
+import logging
+import time
+import unittest
+import hashlib
+
+import novaclient.exceptions as nova_exception
+import paramiko
+import rw_peas
+import rwlogger
+from keystoneclient import v3 as ksclient
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+from rift.rwcal.openstack.openstack_drv import KeystoneDriver, NovaDriver
+
+logger = logging.getLogger('rwcal-openstack')
+
+#
+# Important information about openstack installation. This needs to be manually verified 
+#
+openstack_info = {
+    'username'           : 'pluto',
+    'password'           : 'mypasswd',
+    'auth_url'           : 'http://10.66.4.14:5000/v3/',
+    'project_name'       : 'demo',
+    'mgmt_network'       : 'private',
+    'reserved_flavor'    : 'm1.medium',
+    'reserved_image'     : 'rift-root-latest.qcow2',
+    'physical_network'   : None,
+    'network_type'       : None,
+    'segmentation_id'    : None
+    }
+
+
+def get_cal_account():
+    """
+    Creates an object for class RwcalYang.CloudAccount()
+    """
+    account                        = RwcalYang.CloudAccount()
+    account.account_type           = "openstack"
+    account.openstack.key          = openstack_info['username']
+    account.openstack.secret       = openstack_info['password']
+    account.openstack.auth_url     = openstack_info['auth_url']
+    account.openstack.tenant       = openstack_info['project_name']
+    account.openstack.mgmt_network = openstack_info['mgmt_network']
+    return account
+
+def get_cal_plugin():
+    """
+    Loads rw.cal plugin via libpeas
+    """
+    plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+    engine, info, extension = plugin()
+
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+    cal = plugin.get_interface("Cloud")
+    try:
+        rc = cal.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except:
+        logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+    else:
+        logger.info("Openstack Cal plugin successfully instantiated")
+    return cal 
+
+
+class OpenStackTest(unittest.TestCase):
+    NodeID = "123456789012345" # Some random number to test VM tagging
+    MemoryPageSize = "LARGE"
+    CpuPolicy = "DEDICATED"
+    CpuThreadPolicy = "SEPARATE"
+    CpuThreads = 1
+    NumaNodeCount = 2
+    HostTrust = "trusted"
+    PCIPassThroughAlias = "PCI_10G_ALIAS"
+    SEG_ID = openstack_info['segmentation_id']
+    
+    def setUp(self):
+        """
+        Assumption:
+         - It is assumed that openstack install has a flavor and image precreated.
+         - Flavor_name: x1.xlarge
+         - Image_name : rwimage
+
+        If these resources are not then this test will fail.
+        """
+        self._acct = get_cal_account()
+        logger.info("Openstack-CAL-Test: setUp")
+        self.cal   = get_cal_plugin()
+        logger.info("Openstack-CAL-Test: setUpEND")
+        
+        # First check for VM Flavor and Image and get the corresponding IDs
+        rc, rs = self.cal.get_flavor_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        flavor_list = [ flavor for flavor in rs.flavorinfo_list if flavor.name == openstack_info['reserved_flavor'] ]
+        self.assertNotEqual(len(flavor_list), 0)
+        self._flavor = flavor_list[0]
+
+        rc, rs = self.cal.get_image_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        image_list = [ image for image in rs.imageinfo_list if image.name == openstack_info['reserved_image'] ]
+        self.assertNotEqual(len(image_list), 0)
+        self._image = image_list[0]
+
+        rc, rs = self.cal.get_network_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        networks = [ network for network in rs.networkinfo_list if (network.network_name == 'rift.cal.unittest.network' or network.network_name == 'rift.cal.virtual_link') ]
+        for network in networks:
+            self.cal.delete_virtual_link(self._acct, network.network_id)
+            
+    def tearDown(self):
+        logger.info("Openstack-CAL-Test: tearDown")
+        
+
+    def _md5(fname, blksize=1048576):
+        hash_md5 = hashlib.md5()
+        with open(fname, "rb") as f:
+            for chunk in iter(lambda: f.read(blksize), b""):
+                hash_md5.update(chunk)
+        return hash_md5.hexdigest()
+
+    @unittest.skip("Skipping test_list_flavors")        
+    def test_list_flavor(self):
+        """
+        List existing flavors from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List Flavors Test")
+        rc, rsp = self.cal.get_flavor_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d flavors" %(len(rsp.flavorinfo_list)))
+        for flavor in rsp.flavorinfo_list:
+            rc, flv = self.cal.get_flavor(self._acct, flavor.id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            self.assertEqual(flavor.id, flv.id)
+        
+    @unittest.skip("Skipping test_list_images")                    
+    def test_list_images(self):
+        """
+        List existing images from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List Images Test")
+        rc, rsp = self.cal.get_image_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d images" %(len(rsp.imageinfo_list)))
+        #for image in rsp.imageinfo_list:
+        #    rc, img = self.cal.get_image(self._acct, image.id)
+        #    self.assertEqual(rc, RwStatus.SUCCESS)
+        #    self.assertEqual(image.id, img.id)
+        
+    @unittest.skip("Skipping test_list_vms")                
+    def test_list_vms(self):
+        """
+        List existing VMs from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List VMs Test")
+        rc, rsp = self.cal.get_vm_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d VMs" %(len(rsp.vminfo_list)))
+        for vm in rsp.vminfo_list:
+            rc, server = self.cal.get_vm(self._acct, vm.vm_id)
+            self.assertEqual(vm.vm_id, server.vm_id)
+            
+    @unittest.skip("Skipping test_list_networks")                            
+    def test_list_networks(self):
+        """
+        List existing Network from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List Networks Test")
+        rc, rsp = self.cal.get_network_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d Networks" %(len(rsp.networkinfo_list)))
+        for network in rsp.networkinfo_list:
+            rc, net = self.cal.get_network(self._acct, network.network_id)
+            self.assertEqual(network.network_id, net.network_id)
+        
+    @unittest.skip("Skipping test_list_ports")                                    
+    def test_list_ports(self):
+        """
+        List existing Ports from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List Ports Test")
+        rc, rsp = self.cal.get_port_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        assert(rc == RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d Ports" %(len(rsp.portinfo_list)))
+        for port in rsp.portinfo_list:
+            rc, p = self.cal.get_port(self._acct, port.port_id)
+            self.assertEqual(port.port_id, p.port_id)
+
+    def _get_image_info_request(self):
+        """
+        Returns request object of type RwcalYang.ImageInfoItem()
+        """
+        img = RwcalYang.ImageInfoItem()
+        img.name = "rift.cal.unittest.image"
+        img.location = '/net/sharedfiles/home1/common/vm/rift-root-latest.qcow2'
+        img.disk_format = "qcow2"
+        img.container_format = "bare"
+        img.checksum = self._md5(img.location)
+        return img
+
+    def _get_image_info(self, img_id):
+        """
+        Checks the image status until it becomes active or timeout occurs (100sec)
+        Returns the image_info dictionary
+        """
+        rs = None
+        rc = None
+        for i in range(100):
+            rc, rs = self.cal.get_image(self._acct, img_id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            logger.info("Openstack-CAL-Test: Image (image_id: %s) reached state : %s" %(img_id, rs.state))
+            if rs.state == 'active':
+                break
+            else:
+                time.sleep(2) # Sleep for a second
+        return rs
+    
+    @unittest.skip("Skipping test_create_delete_image")                            
+    def test_create_delete_image(self):
+        """
+        Create/Query/Delete a new image in openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting Image create test")
+        img = self._get_image_info_request()
+        rc, img_id = self.cal.create_image(self._acct, img)
+        logger.info("Openstack-CAL-Test: Created Image with image_id: %s" %(img_id))
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        img_info = self._get_image_info(img_id)
+        self.assertNotEqual(img_info, None)
+        self.assertEqual(img_id, img_info.id)
+        logger.info("Openstack-CAL-Test: Image (image_id: %s) reached state : %s" %(img_id, img_info.state))
+        self.assertEqual(img_info.has_field('checksum'), True)
+        #self.assertEqual(img_info.checksum, OpenStackTest.IMG_Checksum)
+        logger.info("Openstack-CAL-Test: Initiating Delete Image operation for image_id: %s" %(img_id))
+        rc = self.cal.delete_image(self._acct, img_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Image (image_id: %s) successfully deleted" %(img_id))
+
+    def _get_flavor_info_request(self):
+        """
+        Returns request object of type RwcalYang.FlavorInfoItem()
+        """
+        flavor                                     = RwcalYang.FlavorInfoItem()
+        flavor.name                                = 'rift.cal.unittest.flavor'
+        flavor.vm_flavor.memory_mb                 = 16384 # 16GB
+        flavor.vm_flavor.vcpu_count                = 4 
+        flavor.vm_flavor.storage_gb                = 40 # 40GB
+        flavor.guest_epa.mempage_size              = OpenStackTest.MemoryPageSize
+        flavor.guest_epa.cpu_pinning_policy        = OpenStackTest.CpuPolicy
+        flavor.guest_epa.cpu_thread_pinning_policy = OpenStackTest.CpuThreadPolicy
+        flavor.guest_epa.numa_node_policy.node_cnt = OpenStackTest.NumaNodeCount
+        for i in range(OpenStackTest.NumaNodeCount):
+            node = flavor.guest_epa.numa_node_policy.node.add()
+            node.id = i
+            if i == 0:
+                node.vcpu = [0,1]
+            elif i == 1:
+                node.vcpu = [2,3]
+            node.memory_mb = 8196
+        dev = flavor.guest_epa.pcie_device.add()
+        dev.device_id = OpenStackTest.PCIPassThroughAlias
+        dev.count = 1
+        return flavor
+        
+    @unittest.skip("Skipping test_create_delete_flavor")                            
+    def test_create_delete_flavor(self):
+        """
+        Create/Query/Delete a new flavor in openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting Image create/delete test")
+
+        ### Delete any previously created flavor with name rift.cal.unittest.flavor
+        rc, rs = self.cal.get_flavor_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        flavor_list = [ flavor for flavor in rs.flavorinfo_list if flavor.name == 'rift.cal.unittest.flavor' ]
+        if flavor_list:
+            rc = self.cal.delete_flavor(self._acct, flavor_list[0].id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+        
+        flavor = self._get_flavor_info_request()
+        rc, flavor_id = self.cal.create_flavor(self._acct, flavor)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        
+        logger.info("Openstack-CAL-Test: Created new flavor with flavor_id : %s" %(flavor_id))
+        rc, rs = self.cal.get_flavor(self._acct, flavor_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.id, flavor_id)
+
+        # Verify EPA Attributes
+        self.assertEqual(rs.guest_epa.mempage_size, OpenStackTest.MemoryPageSize)
+        self.assertEqual(rs.guest_epa.cpu_pinning_policy, OpenStackTest.CpuPolicy)
+        self.assertEqual(rs.guest_epa.cpu_thread_pinning_policy, OpenStackTest.CpuThreadPolicy)
+        self.assertEqual(rs.guest_epa.numa_node_policy.node_cnt, OpenStackTest.NumaNodeCount)
+        self.assertEqual(len(rs.guest_epa.pcie_device), 1)
+        self.assertEqual(rs.guest_epa.pcie_device[0].device_id, OpenStackTest.PCIPassThroughAlias)
+        self.assertEqual(rs.guest_epa.pcie_device[0].count, 1)
+        logger.info("Openstack-CAL-Test: Initiating delete for flavor_id : %s" %(flavor_id))
+        rc = self.cal.delete_flavor(self._acct, flavor_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        # Check that flavor does not exist anymore in list_flavor
+        rc, rs = self.cal.get_flavor_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        flavor_list = [ flavor for flavor in rs.flavorinfo_list if flavor.id == flavor_id ]
+        # Flavor List should be empty
+        self.assertEqual(len(flavor_list), 0)
+        logger.info("Openstack-CAL-Test: Flavor (flavor_id: %s) successfully deleted" %(flavor_id))
+
+    def _get_vm_info_request(self, flavor_id, image_id):
+        """
+        Returns request object of type RwcalYang.VMInfoItem
+        """
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_name = 'rift.cal.unittest.vm'
+        vm.flavor_id = flavor_id
+        vm.image_id  = image_id
+        vm.cloud_init.userdata = ''
+        vm.user_tags.node_id  = OpenStackTest.NodeID
+        return vm
+
+    def _check_vm_state(self, vm_id, expected_state):
+        """
+        Wait until VM reaches particular state (expected_state). 
+        """
+        # Wait while VM goes to required state
+
+        for i in range(50): # 50 poll iterations...
+            rc, rs = self.cal.get_vm(self._acct, vm_id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            logger.info("Openstack-CAL-Test: VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+            if rs.state == expected_state:
+                break
+            else:
+                time.sleep(1)
+
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.state, expected_state)
+
+    def _create_vm(self, flavor, image, port_list = None):
+        """
+        Create VM and perform validity checks
+        """
+        logger.info("Openstack-CAL-Test: Using image : %s and flavor : %s " %(image.name, flavor.name))
+        vm = self._get_vm_info_request(flavor.id, image.id)
+
+        if port_list:
+            for port_id in port_list:
+                port = vm.port_list.add()
+                port.port_id = port_id 
+
+        rc, vm_id = self.cal.create_vm(self._acct, vm)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Check if VM creation is successful
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Successfully created VM with vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+
+        ### Ensure the VM state is active
+        self._check_vm_state(vm_id, 'ACTIVE')
+
+        ### Ensure that userdata tags are set as expected
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.user_tags.has_field('node_id'), True)
+        self.assertEqual(getattr(rs.user_tags, 'node_id'), OpenStackTest.NodeID)
+        logger.info("Openstack-CAL-Test: Successfully verified the user tags for VM-ID: %s" %(vm_id))
+        return rs, vm_id
+
+    def _delete_vm(self, vm_id):
+        """
+        Delete VM and perform validity checks
+        """
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        logger.info("Openstack-CAL-Test: Initiating VM Delete operation on VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+
+        rc = self.cal.delete_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        for i in range(50):
+            # Check if VM still exists
+            rc, rs = self.cal.get_vm_list(self._acct)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            vm_list = [vm for vm in rs.vminfo_list if vm.vm_id == vm_id]
+            if not len(vm_list):
+                break
+        
+        rc, rs = self.cal.get_vm_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        vm_list = [vm for vm in rs.vminfo_list if vm.vm_id == vm_id]
+        self.assertEqual(len(vm_list), 0)
+        logger.info("Openstack-CAL-Test: VM with vm_id : %s successfully deleted" %(vm_id))
+
+    def _stop_vm(self, vm_id):
+        """
+        Stop VM and perform validity checks
+        """
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Initiating Stop VM operation on VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+        rc = self.cal.stop_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        ### Ensure that VM state is SHUTOFF
+        self._check_vm_state(vm_id, 'SHUTOFF')
+        
+        
+    def _start_vm(self, vm_id):
+        """
+        Starts VM and performs validity checks
+        """
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Initiating Start VM operation on VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+        rc = self.cal.start_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Ensure that VM state is ACTIVE
+        self._check_vm_state(vm_id, 'ACTIVE')
+
+        
+    def _reboot_vm(self, vm_id):
+        """
+        Reboot VM and perform validity checks
+        """
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Initiating Reboot VM operation on VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+        rc = self.cal.reboot_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Ensure that VM state is ACTIVE
+        self._check_vm_state(vm_id, 'ACTIVE')
+
+    def assert_vm(self, vm_data, flavor):
+        """Verify the newly created VM for attributes specified in the flavor.
+
+        Args:
+            vm_data (VmData): Instance of the newly created VM
+            flavor (FlavorInfoItem): Config flavor.
+        """
+        vm_config = flavor
+
+        # Page size seems to be 4096, regardless of the page size name.
+        page_lookup = {"large": '4096', "small": '4096'}
+        FIELDS = ["vcpus", "cpu_threads", "memory_page_size", "disk",
+                  "numa_node_count", "memory", "pci_passthrough_device_list"]
+
+        for field in FIELDS:
+            if field not in vm_config:
+                continue
+
+            vm_value = getattr(vm_data, field)
+            config_value = getattr(vm_config, field)
+
+            if field == "memory_page_size":
+                config_value = page_lookup[config_value]
+
+            if field == "memory":
+                config_value = int(config_value/1000)
+
+            if field == "pci_passthrough_device_list":
+                config_value = len(config_value)
+                vm_value = len(vm_value)
+
+            self.assertEqual(vm_value, config_value)
+
+    @unittest.skip("Skipping test_vm_epa_attributes")
+    def test_vm_epa_attributes(self):
+        """
+        Primary goal: To create a VM with the specified EPA Attributes
+        Secondary goal: To verify flavor creation/delete
+        """
+
+        logger.info("Openstack-CAL-Test: Starting VM(EPA) create/delete test")
+        flavor = self._get_flavor_info_request()
+   
+        rc, flavor_id = self.cal.do_create_flavor(self._acct, flavor)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        flavor.id = flavor_id
+
+        data, vm_id = self._create_vm(flavor, self._image)
+
+        vm_data = VmData(data.host_name, data.management_ip)
+        self.assert_vm(vm_data, flavor)
+
+        self._delete_vm(vm_id)
+
+        rc = self.cal.do_delete_flavor(self._acct, flavor_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+    @unittest.skip("Skipping test_expiry_token")
+    def test_expiry_token(self):
+        """
+        Primary goal: To verify if we are refreshing the expired tokens.
+        """
+        logger.info("Openstack-CAL-Test: Starting token refresh test")
+        drv = KeystoneDriver(
+                openstack_info['username'],
+                openstack_info['password'],
+                openstack_info['auth_url'],
+                openstack_info['project_name'])
+        # Get hold of the client instance need for Token Manager
+        client = drv._get_keystone_connection()
+
+        auth_ref = client.auth_ref
+        token = auth_ref['auth_token']
+
+        # Verify if the newly acquired token works.
+        nova = NovaDriver(drv)
+        flavors = nova.flavor_list()
+        self.assertTrue(len(flavors) > 1)
+
+        # Invalidate the token
+        token_manger = ksclient.tokens.TokenManager(client)
+        token_manger.revoke_token(token)
+
+        time.sleep(10)
+
+        unauth_exp = False
+        try:
+            flavors = nova.flavor_list()
+            print (flavors)
+        except nova_exception.AuthorizationFailure:
+            unauth_exp = True
+
+        self.assertTrue(unauth_exp)
+
+        # Explicitly reset the expire time, to test if we acquire a new token
+        now = datetime.datetime.utcnow()
+        time_str = format(now, "%Y-%m-%dT%H:%M:%S.%fZ")
+        drv._get_keystone_connection().auth_ref['expires_at'] = time_str
+
+        flavors = nova.flavor_list()
+        self.assertTrue(len(flavors) > 1)
+
+    @unittest.skip("Skipping test_vm_operations")                            
+    def test_vm_operations(self):
+        """
+        Primary goal: Create/Query/Delete VM in openstack installation.
+        Secondary goal: VM pause/resume operations on VM.
+
+        """
+        logger.info("Openstack-CAL-Test: Starting VM Operations test")
+
+        # Create VM
+        data, vm_id = self._create_vm(self._flavor, self._image)
+
+        # Stop VM
+        self._stop_vm(vm_id)
+        # Start VM
+        self._start_vm(vm_id)
+
+        vm_data = VmData(data.host_name, data.management_ip)
+        self.assert_vm(vm_data, self._flavor)
+
+        # Reboot VM
+        self._reboot_vm(vm_id)
+        ### Delete the VM
+        self._delete_vm(vm_id)
+
+        
+    def _get_network_info_request(self):
+        """
+        Returns request object of type RwcalYang.NetworkInfoItem
+        """
+        network                            = RwcalYang.NetworkInfoItem()
+        network.network_name               = 'rift.cal.unittest.network'
+        network.subnet                     = '192.168.16.0/24'
+        if openstack_info['physical_network']:
+            network.provider_network.physical_network = openstack_info['physical_network']
+        if openstack_info['network_type']:
+            network.provider_network.overlay_type     = openstack_info['network_type']
+        if OpenStackTest.SEG_ID:
+            network.provider_network.segmentation_id  = OpenStackTest.SEG_ID
+            OpenStackTest.SEG_ID += 1
+        return network
+
+
+    def _create_network(self):
+        """
+        Create a network and verify that network creation is successful
+        """
+        network = self._get_network_info_request()
+
+        ### Create network
+        logger.info("Openstack-CAL-Test: Creating a network with name : %s" %(network.network_name))
+        rc, net_id = self.cal.create_network(self._acct, network)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Verify network is created successfully
+        rc, rs = self.cal.get_network(self._acct, net_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Successfully create Network : %s  with id : %s." %(network.network_name, net_id ))
+
+        return net_id
+
+    def _delete_network(self, net_id):
+        """
+        Delete network and verify that delete operation is successful
+        """
+        rc, rs = self.cal.get_network(self._acct, net_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        logger.info("Openstack-CAL-Test: Deleting a network with id : %s. " %(net_id))
+        rc = self.cal.delete_network(self._acct, net_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        
+        # Verify that network is no longer available via get_network_list API
+        rc, rs = self.cal.get_network_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        network_info = [ network for network in rs.networkinfo_list if network.network_id == net_id ]
+        self.assertEqual(len(network_info), 0)
+        logger.info("Openstack-CAL-Test: Successfully deleted Network with id : %s" %(net_id))
+        
+        
+    @unittest.skip("Skipping test_network_operations")                            
+    def test_network_operations(self):
+        """
+        Create/Delete Networks
+        """
+        logger.info("Openstack-CAL-Test: Starting Network Operation test")
+
+        ### Create Network
+        net_id = self._create_network()
+
+        ### Delete Network
+        self._delete_network(net_id)
+
+    def _get_port_info_request(self, network_id, vm_id):
+        """
+        Returns an object of type RwcalYang.PortInfoItem
+        """
+        port = RwcalYang.PortInfoItem()
+        port.port_name = 'rift.cal.unittest.port'
+        port.network_id = network_id
+        if vm_id != None:
+            port.vm_id = vm_id
+        return port
+
+    def _create_port(self, net_id, vm_id = None):
+        """
+        Create a port in network with network_id: net_id and verifies that operation is successful
+        """
+        if vm_id != None:
+            logger.info("Openstack-CAL-Test: Creating a port in network with network_id: %s and VM with vm_id: %s" %(net_id, vm_id))
+        else:
+            logger.info("Openstack-CAL-Test: Creating a port in network with network_id: %s" %(net_id))
+
+        ### Create Port
+        port = self._get_port_info_request(net_id, vm_id)
+        rc, port_id = self.cal.create_port(self._acct, port)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Get Port
+        rc, rs = self.cal.get_port(self._acct, port_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Successfully create Port with id : %s. Port State :  %s" %(port_id, rs.port_state))
+
+        return port_id
+
+    def _delete_port(self, port_id):
+        """
+        Deletes a port and verifies that operation is successful
+        """
+        rc, rs = self.cal.get_port(self._acct, port_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Deleting Port with id : %s. Port State :  %s" %(port_id, rs.port_state))
+
+        ### Delete Port
+        self.cal.delete_port(self._acct, port_id)
+        
+        rc, rs = self.cal.get_port_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        port_list = [ port for port in rs.portinfo_list if port.port_id == port_id ]
+        self.assertEqual(len(port_list), 0)
+        logger.info("Openstack-CAL-Test: Successfully Deleted Port with id : %s" %(port_id))
+
+    def _monitor_port(self, port_id, expected_state):
+        """
+        Monitor the port state until it reaches expected_state
+        """
+        for i in range(50):
+            rc, rs = self.cal.get_port(self._acct, port_id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            logger.info("Openstack-CAL-Test: Port with id : %s. Port State :  %s" %(port_id, rs.port_state))
+            if rs.port_state == expected_state:
+                break
+        rc, rs = self.cal.get_port(self._acct, port_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.port_state, expected_state)
+        logger.info("Openstack-CAL-Test: Port with port_id : %s reached expected state  : %s" %(port_id, rs.port_state))
+            
+    @unittest.skip("Skipping test_port_operations_with_vm")
+    def test_port_operations_with_vm(self):
+        """
+        Create/Delete Ports in a network and associate it with a VM
+        """
+        logger.info("Openstack-CAL-Test: Starting Port Operation test with VM")
+
+        ### First create a network
+        net_id = self._create_network()
+
+        ### Create a VM
+        data, vm_id = self._create_vm(self._flavor, self._image)
+
+        ### Now create Port which connects VM to Network
+        port_id = self._create_port(net_id, vm_id)
+
+        ### Verify that port goes to active state
+        self._monitor_port(port_id, 'ACTIVE')
+
+        ### Delete VM
+        self._delete_vm(vm_id)
+        
+        ### Delete Port
+        self._delete_port(port_id)
+
+        ### Delete the network
+        self._delete_network(net_id)
+
+    @unittest.skip("Skipping test_create_vm_with_port")
+    def test_create_vm_with_port(self):
+        """
+        Create VM and add ports to it during boot time.
+        """
+        logger.info("Openstack-CAL-Test: Starting Create VM with port test")
+
+        ### First create a network
+        net_id = self._create_network()
+
+        ### Now create Port which connects VM to Network
+        port_id = self._create_port(net_id)
+
+        ### Create a VM
+        data, vm_id = self._create_vm(self._flavor, self._image, [port_id])
+
+        ### Verify that port goes to active state
+        self._monitor_port(port_id, 'ACTIVE')
+
+        ### Delete VM
+        self._delete_vm(vm_id)
+        
+        ### Delete Port
+        self._delete_port(port_id)
+
+        ### Delete the network
+        self._delete_network(net_id)
+
+    @unittest.skip("Skipping test_get_vdu_list")
+    def test_get_vdu_list(self):
+        """
+        Test the get_vdu_list API
+        """
+        logger.info("Openstack-CAL-Test: Test Get VDU List APIs")
+        rc, rsp = self.cal.get_vdu_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d VDUs" %(len(rsp.vdu_info_list)))
+        for vdu in rsp.vdu_info_list:
+            rc, vdu2 = self.cal.get_vdu(self._acct, vdu.vdu_id)
+            self.assertEqual(vdu2.vdu_id, vdu.vdu_id)
+
+
+    @unittest.skip("Skipping test_get_virtual_link_list")
+    def test_get_virtual_link_list(self):
+        """
+        Test the get_virtual_link_list API
+        """
+        logger.info("Openstack-CAL-Test: Test Get virtual_link List APIs")
+        rc, rsp = self.cal.get_virtual_link_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d virtual_links" %(len(rsp.virtual_link_info_list)))
+        for virtual_link in rsp.virtual_link_info_list:
+            rc, virtual_link2 = self.cal.get_virtual_link(self._acct, virtual_link.virtual_link_id)
+            self.assertEqual(virtual_link2.virtual_link_id, virtual_link.virtual_link_id)
+
+    def _get_virtual_link_request_info(self):
+        """
+        Returns object of type RwcalYang.VirtualLinkReqParams
+        """
+        vlink = RwcalYang.VirtualLinkReqParams()
+        vlink.name = 'rift.cal.virtual_link'
+        vlink.subnet = '192.168.1.0/24'
+        if openstack_info['physical_network']:
+            vlink.provider_network.physical_network = openstack_info['physical_network']
+        if openstack_info['network_type']:
+            vlink.provider_network.overlay_type     = openstack_info['network_type'].upper()
+        if OpenStackTest.SEG_ID:
+            vlink.provider_network.segmentation_id  = OpenStackTest.SEG_ID
+            OpenStackTest.SEG_ID += 1
+        return vlink
+        
+    def _get_vdu_request_info(self, virtual_link_id):
+        """
+        Returns object of type RwcalYang.VDUInitParams
+        """
+        vdu = RwcalYang.VDUInitParams()
+        vdu.name = "cal.vdu"
+        vdu.node_id = OpenStackTest.NodeID
+        vdu.image_id = self._image.id
+        vdu.flavor_id = self._flavor.id
+        vdu.vdu_init.userdata = ''
+        vdu.allocate_public_address = True
+        c1 = vdu.connection_points.add()
+        c1.name = "c_point1"
+        c1.virtual_link_id = virtual_link_id
+        c1.type_yang = 'VIRTIO'
+        return vdu
+
+    def _get_vdu_modify_request_info(self, vdu_id, virtual_link_id):
+        """
+        Returns object of type RwcalYang.VDUModifyParams
+        """
+        vdu = RwcalYang.VDUModifyParams()
+        vdu.vdu_id = vdu_id
+        c1 = vdu.connection_points_add.add()
+        c1.name = "c_modify1"
+        c1.virtual_link_id = virtual_link_id
+       
+        return vdu 
+        
+    #@unittest.skip("Skipping test_create_delete_virtual_link_and_vdu")
+    def test_create_delete_virtual_link_and_vdu(self):
+        """
+        Test to create VDU
+        """
+        logger.info("Openstack-CAL-Test: Test Create Virtual Link API")
+        vlink_req = self._get_virtual_link_request_info()
+
+        rc, rsp = self.cal.create_virtual_link(self._acct, vlink_req)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Created virtual_link with Id: %s" %rsp)
+        vlink_id = rsp
+        
+        #Check if virtual_link create is successful
+        rc, rsp = self.cal.get_virtual_link(self._acct, rsp)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rsp.virtual_link_id, vlink_id)
+
+        # Now create VDU
+        vdu_req = self._get_vdu_request_info(vlink_id)
+        logger.info("Openstack-CAL-Test: Test Create VDU API")
+
+        rc, rsp = self.cal.create_vdu(self._acct, vdu_req)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Created vdu with Id: %s" %rsp)
+
+        vdu_id = rsp
+
+        ## Check if VDU create is successful
+        rc, rsp = self.cal.get_vdu(self._acct, rsp)
+        self.assertEqual(rsp.vdu_id, vdu_id)
+
+        ### Wait until vdu_state is active
+        for i in range(50):
+            rc, rs = self.cal.get_vdu(self._acct, vdu_id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            logger.info("Openstack-CAL-Test: VDU with id : %s. Reached State :  %s" %(vdu_id, rs.state))
+            if rs.state == 'active':
+                break
+        rc, rs = self.cal.get_vdu(self._acct, vdu_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.state, 'active')
+        logger.info("Openstack-CAL-Test: VDU with id : %s reached expected state  : %s" %(vdu_id, rs.state))
+        logger.info("Openstack-CAL-Test: VDUInfo: %s" %(rs))
+        
+        vlink_req = self._get_virtual_link_request_info()
+
+        ### Create another virtual_link
+        rc, rsp = self.cal.create_virtual_link(self._acct, vlink_req)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Created virtual_link with Id: %s" %rsp)
+        vlink_id2= rsp
+
+        ### Now exercise the modify_vdu_api
+        vdu_modify = self._get_vdu_modify_request_info(vdu_id, vlink_id2)
+        rc = self.cal.modify_vdu(self._acct, vdu_modify)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Modified vdu with Id: %s" %vdu_id)
+
+        ### Lets delete the VDU
+        self.cal.delete_vdu(self._acct, vdu_id)
+
+        ### Lets delete the Virtual Link
+        self.cal.delete_virtual_link(self._acct, vlink_id)
+
+        ### Lets delete the Virtual Link-2
+        self.cal.delete_virtual_link(self._acct, vlink_id2)
+
+        time.sleep(5)
+        ### Verify that VDU and virtual link are successfully deleted
+        rc, rsp = self.cal.get_vdu_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        for vdu in rsp.vdu_info_list:
+            self.assertNotEqual(vdu.vdu_id, vdu_id)
+
+        rc, rsp = self.cal.get_virtual_link_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        for virtual_link in rsp.virtual_link_info_list:
+            self.assertNotEqual(virtual_link.virtual_link_id, vlink_id)
+
+        logger.info("Openstack-CAL-Test: VDU/Virtual Link create-delete test successfully completed")
+
+
+class VmData(object):
+    """A convenience class that provides all the stats and EPA Attributes
+    from the VM provided
+    """
+    def __init__(self, host, mgmt_ip):
+        """
+        Args:
+            host (str): host name.
+            mgmt_ip (str): The IP of the newly created VM.
+        """
+        # Sleep for 20s to ensure the VM is UP and ready to run commands
+        time.sleep(20)
+        logger.info("Connecting to host: {} and IP: {}".format(host, mgmt_ip))
+        self.client = paramiko.SSHClient()
+        self.client.set_missing_host_key_policy(paramiko.WarningPolicy())
+        self.client.connect(host)
+        self.ip = mgmt_ip
+
+        # Get all data from the newly created VM.
+        self._data = self._get_data()
+        self._page_size = self._exec_and_clean("getconf PAGE_SIZE")
+        self._disk_space = self._exec_and_clean(
+                "df -kh --output=size /",
+                line_no=1)
+        self._pci_data = self._exec('lspci -m | grep "10-Gigabit"')
+
+    def _get_data(self,):
+        """Runs the command and store the output in a python dict.
+
+        Returns:
+            dict: Containing all key => value pairs.
+        """
+        content = {}
+        cmds = ["lscpu", 'less /proc/meminfo']
+        for cmd in cmds:
+            ssh_out = self._exec(cmd)
+            content.update(self._convert_to_dict(ssh_out))
+        return content
+
+    def _exec_and_clean(self, cmd, line_no=0):
+        """A convenience method to run a command and extract the specified line
+        number.
+
+        Args:
+            cmd (str): Command to execute
+            line_no (int, optional): Default to 0, extracts the first line.
+
+        Returns:
+            str: line_no of the output of the command.
+        """
+        output = self._exec(cmd)[line_no]
+        output = ' '.join(output.split())
+        return output.strip()
+
+    def _exec(self, cmd):
+        """Thin wrapper that runs the command and returns the stdout data
+
+        Args:
+            cmd (str): Command to execute.
+
+        Returns:
+            list: Contains the command output.
+        """
+        _, ssh_out, _ = self.client.exec_command(
+                "/usr/rift/bin/ssh_root {} {}".format(self.ip,
+                                                      cmd))
+        return ssh_out.readlines()
+
+    def _convert_to_dict(self, content):
+        """convenience method that cleans and stores the line into dict.
+        data is split based on ":" or " ".
+
+        Args:
+            content (list): A list containing the stdout.
+
+        Returns:
+            dict: containing stat attribute => value.
+        """
+        flattened = {}
+        for line in content:
+            line = ' '.join(line.split())
+            if ":" in line:
+                key, value = line.split(":")
+            else:
+                key, value = line.split(" ")
+            key, value = key.strip(), value.strip()
+            flattened[key] = value
+        return flattened
+
+    @property
+    def disk(self):
+        disk = self._disk_space.replace("G", "")
+        return int(disk)
+
+    @property
+    def numa_node_count(self):
+        numa_cores = self._data['NUMA node(s)']
+        numa_cores = int(numa_cores)
+        return numa_cores
+
+    @property
+    def vcpus(self):
+        cores = int(self._data['CPU(s)'])
+        return cores
+
+    @property
+    def cpu_threads(self):
+        threads = int(self._data['Thread(s) per core'])
+        return threads
+
+    @property
+    def memory(self):
+        memory = self._data['MemTotal']
+        memory = int(memory.replace("kB", ""))/1000/1000
+        return int(memory)
+
+    @property
+    def memory_page_size(self):
+        return self._page_size
+
+    @property
+    def pci_passthrough_device_list(self):
+        return self._pci_data
+
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.INFO)
+    unittest.main()
diff --git a/rwcal/test/test_rwlxc_rwlaunchpad.py b/rwcal/test/test_rwlxc_rwlaunchpad.py
new file mode 100644
index 0000000..0119232
--- /dev/null
+++ b/rwcal/test/test_rwlxc_rwlaunchpad.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import logging
+import os
+
+import rift.rwcal.cloudsim.lxc as lxc
+import rift.rwcal.cloudsim.lvm as lvm
+
+
+logger = logging.getLogger('rwcal-test')
+
+
+def main():
+    template = os.path.realpath("../rift/cal/lxc-fedora-rift.lxctemplate")
+    tarfile = "/net/strange/localdisk/jdowner/lxc.tar.gz"
+    volume = 'rift-test'
+
+    lvm.create(volume, '/lvm/rift-test.img')
+
+    master = lxc.create_container('test-master', template, volume, tarfile)
+
+    snapshots = []
+    for index in range(5):
+        snapshots.append(master.snapshot('test-snap-{}'.format(index + 1)))
+
+    for snapshot in snapshots:
+        snapshot.destroy()
+
+    master.destroy()
+
+    lvm.destroy(volume)
+
+
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.DEBUG)
+    main()