RIFT OSM R1 Initial Submission

Signed-off-by: Jeremy Mordkoff <jeremy.mordkoff@riftio.com>
diff --git a/rwlaunchpad/CMakeLists.txt b/rwlaunchpad/CMakeLists.txt
new file mode 100644
index 0000000..5a52897
--- /dev/null
+++ b/rwlaunchpad/CMakeLists.txt
@@ -0,0 +1,38 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Joshua Downer
+# Author(s): Austin Cormier
+# Creation Date: 5/12/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(PKG_NAME rwlaunchpad)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+set(subdirs
+  mock
+  plugins
+  ra
+  test
+  )
+
+##
+# Include the subdirs
+##
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/mock/CMakeLists.txt b/rwlaunchpad/mock/CMakeLists.txt
new file mode 100644
index 0000000..7695cda
--- /dev/null
+++ b/rwlaunchpad/mock/CMakeLists.txt
@@ -0,0 +1,27 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+cmake_minimum_required(VERSION 2.0)
+
+include(rift_plugin)
+
+set(subdirs
+    plugins
+    )
+
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
+
diff --git a/rwlaunchpad/mock/README b/rwlaunchpad/mock/README
new file mode 100644
index 0000000..6f66c17
--- /dev/null
+++ b/rwlaunchpad/mock/README
@@ -0,0 +1,44 @@
+
+TO test the LP mocklet via command line:
+
+Part 1:  Run the test server infrastructure
+
+1. ssh into a VM, navigate to your workspace rift root and run ./rift-shell
+2. navigate to:
+    modules/core/mc/rwmc/test
+
+3. run:
+    $ python3 ./mission_control.py -m ethsim -c --skip-prepare-vm --mock --skip-ui
+
+The --skip-ui option prevents the server infrastructure from loading Composer
+and the UI (to save time loading, especially if you are going to be running
+the server in your own dev environment).
+
+Part 2: Run the mocklet
+
+1. repeat step 1 above
+2. navigate to:
+    modules/core/mc/rwlp_dts_mock
+
+
+3. If the rwlp_dts_mock/node_modules directory does not exist, run:
+
+    $ npm install
+
+4. Start the mocklet after the server (mission_control.py) has completed initialization
+
+To start the mocklet:
+
+    $ node lp_mock_client.js
+
+5. After the mocklet has started, open another terminal window (can be
+anywhere that can access the restconf server on your VM) and run the following:
+
+Seed the descriptors and instance config objects, run:
+
+    $ ./set_data.sh <vm-ip-address>
+
+Now you are ready to test retrieving an ns-instance-opdata object
+
+    $ get_ns_instance_opdata.sh <vm-ip-address>
+
diff --git a/rwlaunchpad/mock/data/nfvi-metrics.json b/rwlaunchpad/mock/data/nfvi-metrics.json
new file mode 100644
index 0000000..8620b5e
--- /dev/null
+++ b/rwlaunchpad/mock/data/nfvi-metrics.json
@@ -0,0 +1,33 @@
+[
+    {
+        "nfvi_metric": {
+                "vm": {
+                    "active_vm": 1,
+                    "inactive_vm": 1
+                },
+                "memory": {
+                    "used": {
+                        "value": 1
+                    },
+                    "total": {
+                        "value": 2
+                    },
+                    "utilization": {
+                        "value": 1
+                    }
+                },
+                "storage" : {
+                    "used": {
+                        "value": 1
+                    },
+                    "total": {
+                        "value": 2
+                    },
+                    "utilization": {
+                        "value": 1
+                    }
+                }
+        }
+    }
+]
+
diff --git a/rwlaunchpad/mock/data/ns-instance-config.json b/rwlaunchpad/mock/data/ns-instance-config.json
new file mode 100644
index 0000000..29af367
--- /dev/null
+++ b/rwlaunchpad/mock/data/ns-instance-config.json
@@ -0,0 +1,19 @@
+{
+    "nsr": [
+        {
+            "id": "a636c6de-6dd0-11e5-9e8f-6cb3113b406f",
+            "nsd-ref": "a631e8c6-663a-11e5-b122-6cb3113b406f",
+            "admin-status": "ENABLED"
+        },
+        {
+            "id": "c8c6cc24-6dd0-11e5-9e8f-6cb3113b406f",
+            "nsd-ref": "b631e8c6-663a-11e5-b122-6cb3113b406f",
+            "admin-status": "ENABLED"
+        },
+        {
+            "id": "c8c6cf3a-6dd0-11e5-9e8f-6cb3113b406f",
+            "nsd-ref": "c631e8c6-663a-11e5-b122-6cb3113b406f",
+            "admin-status": "DISABLED"
+        }
+    ]
+}
diff --git a/rwlaunchpad/mock/data/nsd_catalog.json b/rwlaunchpad/mock/data/nsd_catalog.json
new file mode 100644
index 0000000..0c6c6ec
--- /dev/null
+++ b/rwlaunchpad/mock/data/nsd_catalog.json
@@ -0,0 +1,44 @@
+{
+    "nsd": [
+        {
+            "id": "a631e8c6-663a-11e5-b122-6cb3113b406f",
+            "name": "Network Service Descriptor 1",
+            "short-name": "NSD1",
+            "vendor": "RIFT.io",
+            "description": "This is a description. It doesn't say much",
+            "version": "0.0.1",
+            "connection-point": [
+                {
+                    "name": "cp-name"
+                }
+            ]
+        },
+        {
+            "id": "b631e8c6-663a-11e5-b122-6cb3113b406f",
+            "name": "Network Service Descriptor 2",
+            "short-name": "NSD2",
+            "vendor": "RIFT.io",
+            "description": "This is a description. It doesn't say much",
+            "version": "0.0.1",
+            "connection-point": [
+                {
+                    "name": "cp-name"
+                }
+            ]
+        },
+        {
+            "id": "c631e8c6-663a-11e5-b122-6cb3113b406f",
+            "name": "Network Service Descriptor 3",
+            "short-name": "NSD3",
+            "vendor": "RIFT.io",
+            "description": "This is a description. It doesn't say much",
+            "version": "0.0.1",
+            "connection-point": [
+                {
+                    "name": "cp-name"
+                }
+            ]
+        }
+    ]
+}
+
diff --git a/rwlaunchpad/mock/data/nsr-templates.json b/rwlaunchpad/mock/data/nsr-templates.json
new file mode 100644
index 0000000..4c512e5
--- /dev/null
+++ b/rwlaunchpad/mock/data/nsr-templates.json
@@ -0,0 +1,57 @@
+[
+    {
+        "create_time": 1445876693,
+        "epa_param": {
+                "ovs_acceleration": {
+                    "vm": 2
+                },
+                "ovs_offload": {
+                    "vm": 2
+                },
+                "ddio": {
+                    "vm": 2
+                },
+                "cat": {
+                    "vm": 2
+                },
+                "cmt": {
+                    "vm": 2
+                }
+        },
+        "monitoring_param": [
+            {
+                "id": "monitoring-param-1",
+                "name": "rate",
+                "description": "Generalized rate monitoring param",
+                "group_tag": "group-a",
+                "min_value": 0,
+                "max_value": 100,
+                "current_value": 0,
+                "widget_type": "GAUGE",
+                "units": "gbps"
+            },
+            {
+                "id": "monitoring-param-2",
+                "name": "size",
+                "description": "Generalized size monitoring param",
+                "group_tag": "group-a",
+                "min_value": 0,
+                "max_value": 100,
+                "current_value": 0,
+                "widget_type": "GAUGE",
+                "units": "gb"
+            },
+            {
+                "id": "monitoring-param-3",
+                "name": "size22",
+                "description": "Generalized size monitoring param",
+                "group_tag": "group-b",
+                "min_value": 0,
+                "max_value": 100,
+                "current_value": 0,
+                "widget_type": "GAUGE",
+                "units": "gb"
+            }
+        ]
+    }
+]
diff --git a/rwlaunchpad/mock/data/ping-pong-ns-instance-config.json b/rwlaunchpad/mock/data/ping-pong-ns-instance-config.json
new file mode 100644
index 0000000..e7d6bb7
--- /dev/null
+++ b/rwlaunchpad/mock/data/ping-pong-ns-instance-config.json
@@ -0,0 +1,10 @@
+{
+    "nsr": [
+        {
+            "id": "f5f41f36-78f6-11e5-b9ba-6cb3113b406f",
+            "nsd-ref": "da1dfbcc-626b-11e5-998d-6cb3113b406f",
+            "admin-status": "ENABLED"
+        }
+    ]
+}
+
diff --git a/rwlaunchpad/mock/data/ping-pong-nsd.json b/rwlaunchpad/mock/data/ping-pong-nsd.json
new file mode 100644
index 0000000..7ad9f6b
--- /dev/null
+++ b/rwlaunchpad/mock/data/ping-pong-nsd.json
@@ -0,0 +1,118 @@
+{
+    "nsd": [
+        {
+            "id": "da1dfbcc-626b-11e5-998d-6cb3113b406f",
+            "name": "ping-pong-nsd",
+            "vendor": "RIFT.io",
+            "description": "Toy NS",
+            "version": "1.0",
+            "connection-point": [
+                {
+                    "name": "ping-pong-nsd/cp0",
+                    "type": "VPORT"
+                },
+                {
+                    "name": "ping-pong-nsd/cp1",
+                    "type": "VPORT"
+                }
+            ],
+            "vld": [
+                {
+                    "id": "ba1c03a8-626b-11e5-998d-6cb3113b406f",
+                    "name": "ping-pong-vld",
+                    "short-name": "ping-pong-vld",
+                    "vendor": "RIFT.io",
+                    "description": "Toy VL",
+                    "version": "1.0",
+                    "type": "ELAN",
+                    "vnfd-connection-point-ref": [
+                        {
+                            "member-vnf-index-ref": 0,
+                            "vnfd-id-ref": "ba145e82-626b-11e5-998d-6cb3113b406f",
+                            "vnfd-connection-point-ref": "ping-pong-vnfd/cp0"
+                        }
+                    ]
+                }
+            ],
+            "constituent-vnfd": [
+                {
+                    "member-vnf-index": 0,
+                    "vnfd-id-ref": "ba145e82-626b-11e5-998d-6cb3113b406f"
+                },
+                {
+                    "member-vnf-index": 1,
+                    "vnfd-id-ref": "ba1947da-626b-11e5-998d-6cb3113b406f"
+                }
+            ],
+            "monitoring-param": [
+                 {
+                    "id": "ping-tx-rate-mp",
+                    "name": "Ping Transmit Rate",
+                    "description": "Ping transmit rate",
+                    "group-tag": "group-1",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 0,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "ping-rc-rate-mp",
+                    "name": "Ping Receive Rate",
+                    "description": "Ping receive rate",
+                    "group-tag": "group-1",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 0,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "ping-packet-size-mp",
+                    "name": "Ping Packet Size",
+                    "description": "Ping packet size",
+                    "group-tag": "group-2",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "pong-tx-rate-mp",
+                    "name": "Pong Transmit Rate 2",
+                    "description": "Pong transmit rate",
+                    "group-tag": "group-2",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 0,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "pong-rc-rate-mp",
+                    "name": "Pong Receive Rate 2",
+                    "description": "Pong eceive rate",
+                    "group-tag": "group-2",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 0,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "pong-packet-size-mp",
+                    "name": "Pong Packet Size",
+                    "description": "Pong packet size",
+                    "group-tag": "group-2",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "TEXTBOX",
+                    "units": "mb"
+                }
+            ]
+        }
+    ]
+}
+
diff --git a/rwlaunchpad/mock/data/ping-pong-vnfd.json b/rwlaunchpad/mock/data/ping-pong-vnfd.json
new file mode 100644
index 0000000..c96ee40
--- /dev/null
+++ b/rwlaunchpad/mock/data/ping-pong-vnfd.json
@@ -0,0 +1,396 @@
+{
+    "vnfd": [
+        {
+            "id": "ba145e82-626b-11e5-998d-6cb3113b406f",
+            "name": "ping-vnfd",
+            "short-name": "ping-vnfd",
+            "vendor": "RIFT.io",
+            "description": "This is an example RIFT.ware VNF",
+            "version": "1.0",
+            "internal-vld": [
+                {
+                    "id" : "ba1478fe-626b-11e5-998d-6cb3113b406f",
+                    "name": "fabric",
+                    "short-name": "fabric",
+                    "description": "Virtual link for internal fabric",
+                    "type": "ELAN"
+                }
+            ],
+            "connection-point": [
+                {
+                    "name": "ping-vnfd/cp0",
+                    "type": "VPORT"
+                },
+                {
+                    "name": "ping-vnfd/cp1",
+                    "type": "VPORT"
+                }
+            ],
+            "vdu": [
+                {
+                    "id": "ba14a504-626b-11e5-998d-6cb3113b406f",
+                    "name": "iovdu",
+                    "count": 2,
+                    "vm-flavor": {
+                        "vcpu-count": 4,
+                        "memory-mb": 1638,
+                        "storage-gb": 16
+                    },
+                    "guest-epa": {
+                        "trusted-execution": true,
+                        "mempage-size": "PREFER_LARGE",
+                        "cpu-pinning-policy": "DEDICATED",
+                        "cpu-thread-pinning-policy": "AVOID",
+                        "numa-node-policy": {
+                            "node-cnt": 2,
+                            "mem-policy": "PREFERRED",
+                            "node": [
+                                {
+                                    "id": 1,
+                                    "vcpu": [ 0, 1 ],
+                                    "memory-mb": 8192
+                                }
+                            ]
+                        }
+                    },
+                    "hypervisor-epa": {
+                            "type": "PREFER_KVM"
+                    },
+                    "host-epa": {
+                        "cpu-model": "PREFER_SANDYBRIDGE",
+                        "cpu-arch": "PREFER_X86_64",
+                        "cpu-vendor": "PREFER_INTEL",
+                        "cpu-socket-count": "PREFER_TWO",
+                        "cpu-feature": [ "PREFER_AES", "PREFER_CAT" ]
+                    },
+                    "image": "rw_openstack.qcow2",
+                    "internal-connection-point": [
+                        {
+                            "id": "ba153744-626b-11e5-998d-6cb3113b406f",
+                            "type": "VPORT"
+                        },
+                        {
+                            "id": "ba15577e-626b-11e5-998d-6cb3113b406f",
+                            "type": "VPORT"
+                        }
+                    ],
+                    "internal-interface": [
+                        {
+                            "name": "eth0",
+                            "vdu-internal-connection-point-ref": "ba153744-626b-11e5-998d-6cb3113b406f",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        },
+                        {
+                            "name": "eth1",
+                            "vdu-internal-connection-point-ref": "ba15577e-626b-11e5-998d-6cb3113b406f",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        }
+                    ],
+                    "external-interface": [
+                        {
+                            "name": "eth0",
+                            "vnfd-connection-point-ref": "ping-vnfd/cp0",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        },
+                        {
+                            "name": "eth1",
+                            "vnfd-connection-point-ref": "ping-vnfd/cp1",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        }
+                    ]
+                }
+            ],
+            "monitoring-param": [
+                {
+                    "id": "ping-tx-rate-mp",
+                    "name": "Ping Transmit Rate",
+                    "description": "Ping transmit rate",
+                    "group-tag": "group-1",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "ping-rc-rate-mp",
+                    "name": "Ping Receive Rate",
+                    "description": "Ping receive rate",
+                    "group-tag": "group-1",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "ping-packet-size-mp",
+                    "name": "Ping Packet Size",
+                    "description": "Ping packet size",
+                    "group-tag": "group-2",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                }
+            ],
+            "rw-vnfd:control-param": [
+                {
+                    "id": "ping-transmit-rate-cp1",
+                    "name": "Transmit Rate",
+                    "description": "Ping transmit rate",
+                    "group-tag": "group-3",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "step-value": 1,
+                    "units": "gbps",
+                    "widget-type": "GAUGE",
+                    "url": "https://%s/api/operations/set-control-param",
+                    "operation": "POST",
+                    "payload": "{\"set-control-param\":{\"id\":\"%s\",\"obj-code\":\"VNFR\",\"control-id\":\"ping-transmit-rate-cp1\",\"value\":10} }"
+                },
+                {
+                    "id": "ping-packet-size-cp1",
+                    "name": "Ping Packet Size",
+                    "description": "Packet size",
+                    "group-tag": "group-4",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "step-value": 1,
+                    "units": "gbps",
+                    "widget-type": "GAUGE",
+                    "url": "https://%s/api/operations/set-control-param",
+                    "operation": "POST",
+                    "payload": "{\"set-control-param\":{\"id\":\"%s\",\"obj-code\":\"VNFR\",\"control-id\":\"ping-packet-size-cp1\",\"value\":10 } }"
+                }
+            ],
+            "rw-vnfd:action-param" : [
+                {
+                    "id": "start-vnfr",
+                    "name": "Start PING",
+                    "description": "Start the PUNG VNFR",
+                    "group-tag": "start-vnfr",
+                    "url": "https://%s/api/operations/start-vnfr",
+                    "operation": "POST",
+                    "payload": "{\"start-vnfr\": { \"id\": \"%s\" }  }"
+                },
+                {
+                    "id": "stop-vnfr",
+                    "name": "Stop PING",
+                    "description": "Stop the PING VNFR",
+                    "group-tag": "stop-vnfr",
+                    "url": "https://%s/api/operations/stop-vnfr",
+                    "operation": "POST",
+                    "payload": "{\"stop-vnfr\": { \"id\": \"%s\" }  }"
+                }
+            ]
+        },
+        {
+            "id": "ba1947da-626b-11e5-998d-6cb3113b406f",
+            "name": "pong-vnfd",
+            "short-name": "pong-vnfd",
+            "vendor": "RIFT.io",
+            "description": "This is an example RIFT.ware VNF",
+            "version": "1.0",
+            "internal-vld": [
+                {
+                    "id" : "ba1478fe-626b-11e5-998d-6cb3113b406f",
+                    "name": "fabric",
+                    "short-name": "fabric",
+                    "description": "Virtual link for internal fabric",
+                    "type": "ELAN"
+                }
+            ],
+            "connection-point": [
+                {
+                    "name": "pong-vnfd/cp0",
+                    "type": "VPORT"
+                },
+                {
+                    "name": "pong-vnfd/cp1",
+                    "type": "VPORT"
+                }
+            ],
+            "vdu": [
+                {
+                    "id": "ba14a504-626b-11e5-998d-6cb3113b406f",
+                    "name": "iovdu",
+                    "count": 2,
+                    "vm-flavor": {
+                        "vcpu-count": 4,
+                        "memory-mb": 1638,
+                        "storage-gb": 16
+                    },
+                    "guest-epa": {
+                        "trusted-execution": true,
+                        "mempage-size": "PREFER_LARGE",
+                        "cpu-pinning-policy": "DEDICATED",
+                        "cpu-thread-pinning-policy": "AVOID",
+                        "numa-node-policy": {
+                            "node-cnt": 2,
+                            "mem-policy": "PREFERRED",
+                            "node": [
+                                {
+                                    "id": 1,
+                                    "vcpu": [ 0, 1 ],
+                                    "memory-mb": 8192
+                                }
+                            ]
+                        }
+                    },
+                    "hypervisor-epa": {
+                            "type": "PREFER_KVM"
+                    },
+                    "host-epa": {
+                        "cpu-model": "PREFER_SANDYBRIDGE",
+                        "cpu-arch": "PREFER_X86_64",
+                        "cpu-vendor": "PREFER_INTEL",
+                        "cpu-socket-count": "PREFER_TWO",
+                        "cpu-feature": [ "PREFER_AES", "PREFER_CAT" ]
+                    },
+                    "image": "rw_openstack.qcow2",
+                    "internal-connection-point": [
+                        {
+                            "id": "ba153744-626b-11e5-998d-6cb3113b406f",
+                            "type": "VPORT"
+                        },
+                        {
+                            "id": "ba15577e-626b-11e5-998d-6cb3113b406f",
+                            "type": "VPORT"
+                        }
+                    ],
+                    "internal-interface": [
+                        {
+                            "name": "eth0",
+                            "vdu-internal-connection-point-ref": "ba153744-626b-11e5-998d-6cb3113b406f",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        },
+                        {
+                            "name": "eth1",
+                            "vdu-internal-connection-point-ref": "ba15577e-626b-11e5-998d-6cb3113b406f",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        }
+                    ],
+                    "external-interface": [
+                        {
+                            "name": "eth0",
+                            "vnfd-connection-point-ref": "pong-vnfd/cp0",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        },
+                        {
+                            "name": "eth1",
+                            "vnfd-connection-point-ref": "pong-vnfd/cp1",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        }
+                    ]
+                }
+            ],
+            "monitoring-param": [
+                {
+                    "id": "pong-tx-rate-mp",
+                    "name": "Pong Transmit Rate",
+                    "description": "Pong transmit rate",
+                    "group-tag": "group-1",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "pong-rx-rate-mp",
+                    "name": "Pong Receive Rate",
+                    "description": "Pong receive rate",
+                    "group-tag": "group-1",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "pong-packet-size-mp",
+                    "name": "Pong Packet Size",
+                    "description": "Pong packet size",
+                    "group-tag": "group-2",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "TEXTBOX",
+                    "units": "mb"
+                }
+            ],
+            "rw-vnfd:control-param" : [
+                {
+                    "id": "pong-receive-rate-cp1",
+                    "name": "Pong Receive Rate",
+                    "description": "Pong receive rate",
+                    "group-tag": "group-3",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 0,
+                    "step-value": 1,
+                    "units": "gbps",
+                    "widget-type": "GAUGE",
+                    "url": "https://{host}/api/operations/vnfr-control-param/",
+                    "operation": "POST",
+                    "payload": "{\"set-control-param\":{\"id\":\"%s\",\"obj-code\":\"VNFR\",\"control-id\":\"pong-receive-rate-cp1\",\"value\":10} }"
+                },
+                {
+                    "id": "pong-packet-size-cp1",
+                    "name": "Pong Packaet Size",
+                    "description": "Packet size",
+                    "group-tag": "group-4",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 0,
+                    "step-value": 1,
+                    "units": "gbps",
+                    "widget-type": "GAUGE",
+                    "url": "https://%s/api/operations/set-control-param",
+                    "operation": "POST",
+                    "payload": "{\"set-control-param\":{\"id\":\"%s\",\"obj-code\":\"VNFR\",\"control-id\":\"pong-packet-size-cp1\",\"value\":10 } }"
+                }
+            ],
+            "rw-vnfd:action-param" : [
+                {
+                    "id": "start-vnfr",
+                    "name": "Start PONG",
+                    "description": "Start the PONG VNFR",
+                    "group-tag": "start-vnfr",
+                    "url": "https://%s/api/operations/start-vnfr",
+                    "operation": "POST",
+                    "payload": "{\"start-vnfr\": { \"id\": \"%s\" }  }"
+                },
+                {
+                    "id": "stop-vnfr",
+                    "name": "Stop PONG",
+                    "description": "Stop the PONG VNFR",
+                    "group-tag": "stop-vnfr",
+                    "url": "https://%s/api/operations/stop-vnfr",
+                    "operation": "POST",
+                    "payload": "{\"stop-vnfr\": { \"id\": \"%s\" }  }"
+                }
+            ]
+       }
+   ]
+}
diff --git a/rwlaunchpad/mock/data/simmp-rules.json b/rwlaunchpad/mock/data/simmp-rules.json
new file mode 100644
index 0000000..d92f835
--- /dev/null
+++ b/rwlaunchpad/mock/data/simmp-rules.json
@@ -0,0 +1,11 @@
+{
+    "description": "Rules for Simulating monitoring params",
+    "mp-mapper": {
+        "ping-tx-rate-mp": "tx_rc_rate",
+        "ping-rc-rate-mp": "tx_rc_rate",
+        "pong-tx-rate-mp": "tx_rc_rate",
+        "pong-rc-rate-mp": "tx_rc_rate",
+        "ping-packet-size-mp": "packet_size",
+        "pong-packet-size-mp": "packet_size"
+    }
+}
diff --git a/rwlaunchpad/mock/data/vld_catalog.json b/rwlaunchpad/mock/data/vld_catalog.json
new file mode 100644
index 0000000..0de0e29
--- /dev/null
+++ b/rwlaunchpad/mock/data/vld_catalog.json
@@ -0,0 +1,16 @@
+{
+    "vld": [
+        {
+            "id": "a631e8c6-663a-11e5-b122-6cb3113b406f",
+            "name": "vld-one"
+        },
+        {
+            "id": "b631e8c6-663a-11e5-b122-6cb3113b406f",
+            "name": "vld-two"
+        },
+        {
+            "id": "c631e8c6-663a-11e5-b122-6cb3113b406f",
+            "name": "vld-three"
+        }
+    ]
+}
diff --git a/rwlaunchpad/mock/data/vnfd_catalog.json b/rwlaunchpad/mock/data/vnfd_catalog.json
new file mode 100644
index 0000000..1951980
--- /dev/null
+++ b/rwlaunchpad/mock/data/vnfd_catalog.json
@@ -0,0 +1,47 @@
+{
+    "vnfd": [
+        {
+            "id": "a200a0a0-663a-11e5-b122-6cb3113b406f",
+            "name": "Virtual Network Descriptor 1",
+            "short-name": "VNFD1",
+            "vendor": "RIFT.io",
+            "description": "This is a description. It doesn't say much",
+            "version": "0.0.1",
+            "internal-vld": [
+                    {
+                        "id" : "68981800-7201-11e5-9fc4-bf5ad0442ce5",
+                        "name": "Zathrus",
+                        "short-name": "zathrus",
+                        "description": "Virtual link for zathrus",
+                        "type": "ELAN",
+                        "root-bandwidth": 42,
+                        "leaf-bandwidth": 42,
+                        "internal-connection-point-ref": [
+
+                            ]
+                    }
+              ]
+        },
+        {
+            "id": "b200a0a0-663a-11e5-b122-6cb3113b406f",
+            "name": "vnfd-two",
+            "short-name": "VNFD2",
+            "vendor": "RIFT.io",
+             "description": "This is a description. It doesn't say much",
+            "version": "0.0.1",
+            "internal-vld": [
+
+            ]
+        },
+        {
+            "id": "c200a0a0-663a-11e5-b122-6cb3113b406f",
+            "name": "vnfd-three",
+            "short-name": "VNFD03",
+            "vendor": "RIFT.io",
+            "description": "This is a description. It doesn't say much",
+            "version": "0.0.1",
+            "internal-vld": [
+            ]
+        }
+    ]
+}
diff --git a/rwlaunchpad/mock/data/vnfr-templates.json b/rwlaunchpad/mock/data/vnfr-templates.json
new file mode 100644
index 0000000..a93dafb
--- /dev/null
+++ b/rwlaunchpad/mock/data/vnfr-templates.json
@@ -0,0 +1,54 @@
+[
+    {
+        "action_param": [
+            {
+                "id": "actionparam01",
+                "name": "Start Me Up",
+                "description": "This is a description. It doesn't say much",
+                "group_tag": "control-group1",
+                "url": "http://localhost:8091/vnfr/1/start"
+            },
+            {
+                "id": "actionparam02",
+                "name": "Stop me",
+                "description": "This is a description. It doesn't say much",
+                "group_tag": "control-group1",
+                "url": "http://localhost:8091/vnfr/1/stop",
+                "operation": "POST"
+            }
+        ],
+       "control_param": [
+            {
+                "id": "controlparam01",
+                "name": "Control Param 1",
+                "description": "This is a description. It doesn't say much",
+                "group_tag": "control-group1",
+                "min_value": 0,
+                "max_value": 100,
+                "current_value": 0,
+                "step_value": 1,
+                "units": "smoots",
+                "widget_type": "HISTOGRAM",
+                "url": "http://localhost:8091/vnfr/1/control-1",
+                "operation": "POST",
+                "payload": "{ \"test\": \"sample value\" }"
+            },
+            {
+                "id": "controlparam02",
+                "name": "Control Param 2",
+                "description": "This is a description. It doesn't say much",
+                "group_tag": "control-group1",
+                "min_value": 0,
+                "max_value": 100,
+                "current_value": 0,
+                "step_value": 1,
+                "units": "smoots",
+                "widget_type": "GAUGE",
+                "url": "http://localhost:8091/vnfr/1/control-2",
+                "operation": "POST",
+                "payload": "{ \"test\": \"sample value\" }"
+            }
+        ]
+    }
+]
+
diff --git a/rwlaunchpad/mock/data_model.js b/rwlaunchpad/mock/data_model.js
new file mode 100644
index 0000000..ef56c68
--- /dev/null
+++ b/rwlaunchpad/mock/data_model.js
@@ -0,0 +1,569 @@
+/*
+ *  This module provides the data model layer for the Launchpad Mocklet
+ */
+
+var util = require('util');
+var uuid = require('node-uuid');
+var _ = require('lodash');
+
+// Our modules
+var simmp_module = require('./simmp.js');
+
+// Data packages
+// TODO: Make these parameters to pass to the data model
+// instead of hardcoding them as requires here
+var simmp_rules = require('./data/simmp-rules.json');
+var nsr_templates = require('./data/nsr-templates.json');
+var vnfr_templates = require('./data/vnfr-templates.json');
+
+/*
+ * Generic  to throw on data model exceptions
+ */
+function DataModelException(message) {
+    this.message = message;
+    this.name = "DataModelException";
+}
+
+/*
+ * This 
+ * This function is temporary until all needed features are implemented in this mocklet
+ */
+function NotImplementedException(message) {
+    this.message = "You have fallen off the edge of the world: "+message;
+    this.name = 'NotImplementedException';
+}
+
+
+/*
+ * Class to handle simulating events over time for monitoring params
+ */
+MonitoringParam = function(values, time_function) {
+    this.values = values;
+    this.timeFunc = time_function;
+}
+
+MonitoringParam.prototype.timeStep = function(elapsed_seconds) {
+    this.values.current_value = this.timeFunc(this.values.current_value,
+            elapsed_seconds);
+    return this.values.current_value;
+};
+
+/*
+ * DataModel constructor
+ *
+ * Arguments
+ *   restconf_host - Host name and port. eg: 'localhost:8008'
+ */
+DataModel = function (restconf_host) {
+    this.restconf_host = restconf_host ? restconf_host : "localhost:8008";
+
+    this.simmp = new simmp_module.SimMp(simmp_rules);
+    if (!this.simmp) {
+        throw "simmp failed to initialize";
+    }
+    // Time data for event simulation (monitoring params)
+    this.start_time = Date.now();
+    this.previous_time =this.start_time;
+
+    // Store descriptors
+    this.descriptors = { nsd: {}, vnfd: {}, vld: {} };
+
+    // Store instance config data. Currently only NS Yang implements config data
+    this.config_records = { nsr: {}, vnfr: {}, vlr: {} };
+
+    // Stores Virtual Network Function instance records
+    this.vnfr_records = { };
+
+    // Stores Network Service instance operational records
+    this.ns_opdata_records = { };
+
+    // Manage which mock data to use next
+    this.vnfr_template_index = 0;
+    this.nsr_template_index = 0;
+
+    // Operational (running) state for opdata records
+    // 'on', 'off'
+    // TBD: do restarting
+    this.opstate = { nsr: {}, vnfr: {} };
+
+    // Store MonitoringParam objects
+    this.monitoring_params = {nsr: {}, vnfr: {} };
+}
+
+
+/*
+ * creates a descriptor name from the record name
+ */
+DataModel.prototype.rec2desc = function (record_type) {
+    if (record_type.charAt(record_type.lenth-1) == 'r') {
+        return record_type.slice(0, -1)+'d';
+    } else if (["ns","vnf","vl"].indexOf(record_type_) != -1) {
+        return record_type + 'd';
+    } else {
+        throw new DataModelException('"%s" is not a supported record type', record_type);
+    }
+};
+
+DataModel.prototype.setDescriptor = function(descriptor_type, descriptor) {
+        if (!this.descriptors.hasOwnProperty(descriptor_type)) {
+            throw new DataModelException('"%s" is not a supported descriptor type', descriptor_type);
+        }
+
+        this.descriptors[descriptor_type][descriptor.id] = descriptor;
+};
+
+DataModel.prototype.setConfigRecord = function(record_type, record) {
+         if (!this.config_records.hasOwnProperty(record_type)) {
+            throw new DataModelException('"%s" is not a supported record type', record_type);
+        }
+
+        this.config_records[record_type][record.id] = record;
+};
+
+DataModel.prototype.findConfigRecord = function(record_type, record_id) {
+        if (this.config_records.hasOwnProperty(record_type)) {
+            return this.config_records[record_type][record_id];
+        } else {
+            return null;
+        }
+};
+
+/*
+ *
+ */
+DataModel.prototype.updateControlParam = function(record_type, record_id,
+        control_id, value) {
+    if (record_type == 'vnfr') {
+        var record = this.vnfr_records[record_id];
+    } else {
+        var record = this.ns_opdata_records[record_id];
+    }
+    // find the control param
+    if ('control_param' in record) {
+        for (var i=0; i < record.control_param.length; i++) {
+            if (control_id == record.control_param[i].id) {
+                // Make sure value is within min and max values
+                if (value >= record.control_param[i].min_value &&
+                    value <= record.control_param[i].max_value) {
+
+                    record.control_param[i].current_value = value;
+                    return 'SUCCESS';
+                } else {
+                    var errmsg = 'value "'+value+'" out of range. '+
+                        'Needs to be within '+ record_control_param[i].min_value +
+                        ' and ' + record_control_param[i].max_value;
+                    throw new DataModelException(errmsg);
+                }
+            }
+        }
+    } else {
+        var errmsg = 'Record type "' + record_type + '" with id "'+
+            record_id + '" does not have any control params';
+        throw new DataModelException(errmsg);
+    }
+};
+
+/*
+ * NS functions
+ *
+ * General comments on NS instance config/opdata:
+ *  For each ns-instance-config, the descriptor needs to be added first
+ */
+
+// TODO: Consolidate the template handling functions
+DataModel.prototype.nextNsrTemplate = function() {
+    var nsr_template = _.clone(nsr_templates[this.nsr_template_index], true);
+    this.nsr_template_index += 1;
+    if (this.nsr_template_index >= nsr_templates.length) {
+        this.nsr_template_index = 0;
+    }
+    return nsr_template;
+};
+
+DataModel.prototype.getNsdConnectionPoints = function(nsd_id) {
+    var nsd =  this.descriptors['nsd'][nsd_id];
+    if (!nsd) {
+        throw new DataModelException("NSD ID '%s' does not exist", nsd_id);
+    }
+    // console.log("\n\nnsd = %s", JSON.stringify(nsd));
+    return nsd['connection_point'];
+};
+
+
+DataModel.prototype.createNsrControlParams = function(ns_instance_config_id) {
+    // TODO: find all VNFDs associated with this NS instance
+    // then either call this.createVnfrControlParams if you want to talk
+    // VNFR specific control params or we can generalize 'createVnfrControlParams'
+    // to pass in 'record_id' instead of vnfr_id.
+    //
+    var control_params = [];
+
+    return control_params;
+};
+
+/*
+ * Sets an ns-instance-config object record and creates an
+ * ns-instance-opdata record.
+ *
+ * If the NS instance opdata record matching the id of the ns-instance-config
+ * already exists, then remove the ns-instance-opdate record and reconstruct.
+ */
+DataModel.prototype.setNsInstanceConfig = function(ns_instance_config) {
+    // we are updating an existing ns-instance record set
+    // There is an issue that subsequent 'PUT' actions do not transfer
+    // the whole data to the mocklet. So we need to retrieve the existingt
+    // ns-instance-config to get the nsd-ref
+
+    // TODO: Consider creating a 'set_or_update' method for ns-instance-config
+    var ns_config = this.findConfigRecord('nsr', ns_instance_config.id);
+    if (ns_config) {
+        ns_config.admin_status = ns_instance_config.admin_status;
+    } else {
+        this.setConfigRecord('nsr', ns_instance_config);
+        ns_config = ns_instance_config;
+    }
+    if (ns_config.id in this.ns_opdata_records) {
+        delete this.ns_opdata_records[ns_config.id];
+    }
+    // if ns-instance-config is 'ENABLED', then create an ns-instance-opdata
+    if (ns_config.admin_status == 'ENABLED') {
+        ns_opdata = this.generateNsInstanceOpdata(ns_config);
+        // set the ns instance opdata. Doesn't matter if it already exists
+        this.ns_opdata_records[ns_opdata.ns_instance_config_ref] = ns_opdata;
+    }
+};
+
+DataModel.prototype.generateMonitoringParams = function(descriptor_type, descriptor_id) {
+    console.log('Called generateMonitoringParams');
+    if (!(descriptor_type in this.descriptors)) {
+        throw DataModelException('descriptor type "%s" not found');
+    }
+    var descriptor = this.descriptors[descriptor_type][descriptor_id];
+    var a_simmp = this.simmp;
+    if (descriptor) {
+        if ('monitoring_param' in descriptor) {
+            return descriptor['monitoring_param'].map(function(obj) {
+                var simFunc = a_simmp.createSimMonitorFunc(obj);
+                return new MonitoringParam(_.clone(obj, true), simFunc);
+            });
+        } else {
+            console.log('Descriptor(type=%s) with (id=%s) does not have ' +
+               'monitoring params', descriptor_type, descriptor_id);
+            return [];
+        }
+    } else {
+        throw new DataModelException("Cannot find descriptor %s with id '%s'",
+                descriptor_type, descriptor_id);
+    }
+};
+
+DataModel.prototype.updateMonitoringParams = function(instance_type, instance_id) {
+    var sim_mp = this.monitoring_params[instance_type][instance_id];
+    if (sim_mp) {
+        var time_now = Date.now();
+        var elapsed_seconds = (time_now - this.previous_time) / 1000;
+        var monitoring_params = sim_mp.map(function(obj) {
+            obj.timeStep(elapsed_seconds);
+            return obj.values;
+        });
+        this.previous_time = time_now;
+        return monitoring_params;
+    } else {
+        // TODO: Figure out hosw we want to handle this case
+        return [];
+    }
+};
+
+/*
+ * Creates an ns-instance-opdata object, but does not add it to the data
+ * store.
+ */
+DataModel.prototype.generateNsInstanceOpdata = function (ns_config) {
+    var nsr_template = this.nextNsrTemplate();
+
+    // HACK: We need to get control and action param from the nsr
+    // or have a function that synchronizes the next array element in
+    // the templates
+    var vnfr_template = this.nextVnfrTemplate();
+
+    var nsd_id = ns_config.nsd_ref;
+    var connection_points = this.getNsdConnectionPoints(ns_config.nsd_ref);
+    var sim_mp = this.generateMonitoringParams('nsd', nsd_id);
+    // save for using in update
+    this.monitoring_params['nsr'][ns_config.id] = sim_mp;
+    var monitoring_params = sim_mp.map(function(obj) {
+        // not time stepping when we create them
+        return obj.values;
+    });
+
+    return {
+        ns_instance_config_ref: ns_config.id,
+        'connection_point' : _.clone(connection_points, true),
+        epa_param: _.clone(nsr_template['epa_param'], true),
+        // NOTE: Remarked out until nfvi metrics figured out
+        //nfvi_metric: _.clone(nsr_template['nfvi_metric'], true),
+        monitoring_param: monitoring_params,
+        //monitoring_param: _.clone(nsr_template['monitoring_param'], true),
+        create_time: nsr_template['create_time'],
+        action_param: vnfr_template['action_param'],
+        // TODO: control_param: this.createNsrControlParams(ns_config.id);
+        control_param: vnfr_template['control_param']
+    };
+};
+
+DataModel.prototype.getNsInstanceOpdata = function() {
+    var opdata_records = [];
+    var config_records = this.config_records['nsr'];
+    for (config_record_id in config_records) {
+        if (config_records[config_record_id]['admin_status'] == 'ENABLED') {
+            console.log('Is ENABLED: ns-instance-config record with id %s', config_record_id);
+
+            ns_op_rec = this.ns_opdata_records[config_record_id];
+            if (ns_op_rec) {
+                // TODO: update monitoring params
+                ns_op_rec.monitoring_param = this.updateMonitoringParams(
+                        'nsr', config_record_id);
+                opdata_records.push(ns_op_rec);
+            } else {
+                console.log('NO RECORD FOUND for ns config id: %s', config_record_id);
+            }
+        } else {
+            console.log('Either no admin status record or not enabled');
+        }
+    }
+    return opdata_records;
+};
+
+
+/* =============
+ * VNF functions
+ * =============
+ */
+
+/*
+ * Gets the next VNFR template from the array of VNFR templates and 
+ * increments the VNFR template counter. Wraps back to the first VNFR
+ * template when the last one is used.
+ */
+DataModel.prototype.nextVnfrTemplate = function() {
+    var vnfr_template = _.clone(vnfr_templates[this.vnfr_template_index], true);
+    this.vnfr_template_index += 1;
+    if (this.vnfr_template_index >= vnfr_templates.length) {
+        this.vnfr_template_index = 0;
+    }
+    return vnfr_template;
+}
+
+/*
+ * Arguments
+ *  vnfd - VNF Descriptor object
+ *  vnfr_id - VNFR unique identifier
+ *  host  - host name and port
+ */
+DataModel.prototype.createVnfrActionParams = function(vnfd, vnfr_id) {
+    // Canned start, stop for now
+    // TBD: read action params from VNFD and create here
+    // Use
+    var action_param = [
+        {
+            id: uuid.v1(),
+            name: "Start Me",
+            description: "Start this VNFR",
+            group_tag: "start-vnfr",
+            url: "https://"+this.restconf_host+"/api/operations/start-vnfr",
+            operation: "POST",
+            payload: '{"start-vnfr": { "id": "'+vnfr_id+'"}}'
+        },
+        {
+            id: uuid.v1(),
+            name: "Stop Me",
+            description: "Stop this VNFR",
+            group_tag: "stop-vnfr",
+            url: "https://"+this.restconf_host+"/api/operations/stop-vnfr",
+            operation: "POST",
+            payload: '{"stop-vnfr": { "id": "'+vnfr_id+'"}}'
+        }
+    ];
+    return action_param;
+};
+
+DataModel.prototype.createVnfrControlParams = function(vnfd, vnfr_id,
+        vnfr_template) {
+    console.log("Called Datamodel.prototype.createVnfrControlParams");
+    if (vnfr_template) {
+        console.log("returning clone of vnfr_template['control_param']");
+        return _.clone(vnfr_template['control_param'], true);
+    } else {
+        if (vnfd.control_param) {
+            console.log("VNFD's control-param="+JSON.stringify(vnfd.control_param));
+            var a_restconf_host = this.restconf_host;
+            var cp_arry = _.clone(vnfd.control_param, true);
+            var control_params = vnfd.control_param.map(function(obj) {
+                var cp = _.clone(obj, true);
+                cp.url = util.format(cp.url, a_restconf_host);
+                console.log("\ncontrol-param payload before:"+ cp.payload);
+                cp.payload = util.format(cp.payload, vnfr_id);
+                console.log("\ncontrol-param payload after:"+ cp.payload+"\n");
+                return cp;
+            });
+            return control_params;
+        } else {
+            return [];
+        }
+        throw new NotImplementedException("createVnfrControlParam: non-template");
+    }
+}
+
+/*
+ * Creates a new VNFR base on the VNFD in the argument.
+ * This method is intended to not have side effects, otherwise
+ * just put this code in this.addVnfData
+ */
+DataModel.prototype.createVnfr = function(vnfd) {
+    //var vnfr_template = this.nextVnfrTemplate();
+    var vnfr_id = uuid.v1();
+
+    return {
+        id: vnfr_id,
+        // Hack: Copy the VNFD values but append '-Record' to end
+        name: vnfd.name + ' Record',
+        short_name: vnfd.short_name + '_REC',
+        vendor: vnfd.vendor,
+        description: vnfd.description,
+        version: vnfd.version,
+        vnfd_ref: vnfd.id,
+        internal_vlr: [],
+        // Even though this is in the Yang, it doesn't exist in the
+        // instantiated model:
+        // 'internal_connection_point_ref': [],
+        action_param: this.createVnfrActionParams(vnfd, vnfr_id),
+        //control_param: _.clone(vnfr_template['control_param'], true)
+        control_param: this.createVnfrControlParams(vnfd, vnfr_id)
+    };
+};
+
+
+/*
+ * Creates and adds a new VNFD and matching VNFR record to our data store
+ *
+ * TODO: Might need to be updated so we create a VNFR when a start VNFR is called
+ *
+ */
+DataModel.prototype.addVnfData = function(vnfd) {
+    // if the vnfd does not already exist:
+    if (this.descriptors['vnfd'][vnfd.id] == null) {
+        console.log("adding new vnfd with id %s", vnfd.id);
+        this.setDescriptor('vnfd', vnfd);
+        // create a vnfr record, but without monitoring-param
+        var vnfr = this.createVnfr(vnfd);
+
+        var sim_mp = this.generateMonitoringParams('vnfd', vnfd.id);
+        // save for using in update
+        this.monitoring_params['vnfr'][vnfr.id] = sim_mp;
+        vnfr.monitoring_param = sim_mp.map(function(obj) {
+            // not time stepping when we create them
+            return obj.values;
+        });
+        this.vnfr_records[vnfr.id] = vnfr;
+    } else {
+        // do nothing
+    }
+};
+
+
+DataModel.prototype.getVnfrs = function () {
+    records = [];
+    for (vnfr_id in this.vnfr_records) {
+        // When admin-status is implemented, then return only those 'ENABLED'
+        var vnfr_record = this.vnfr_records[vnfr_id];
+        vnfr_record.monitoring_param = this.updateMonitoringParams(
+                'vnfr', vnfr_id);
+        records.push(vnfr_record);
+    }
+    return records;
+}
+
+
+// Move the following to a new VnfrManager class
+
+DataModel.prototype.startVnfr = function(vnfr_id) {
+    console.log('Calling DataModel.startVnfr with id "%s"', vnfr_id);
+
+    console.log('Here are the VNFR ids we have:');
+    for (key in this.vnfr_records) {
+        console.log('id:%s"', key);
+    }
+    //console.log('vnfr_records = %s', JSON.stringify(this.vnfr_records));
+
+    if (!(vnfr_id in this.vnfr_records)) {
+        var errmsg = 'Cannot find vnfr record with id "'+vnfr_id+'"';
+        console.error('\n\n'+errmsg+'\n\n');
+        throw new DataModelException(errmsg);
+    }
+    // Just add/set it
+    this.opstate.vnfr[vnfr_id] = 'ON';
+    return this.vnfr_records[vnfr_id];
+}
+
+DataModel.prototype.stopVnfr = function(vnfr_id) {
+    console.log('Calling DataModel.stopVnfr with id "%s"', vnfr_id);
+    if (!(vnfr_id in this.vnfr_records)) {
+        var errmsg = 'Cannot find vnfr record with id "'+vnfr_id+'"';
+        console.error(errmsg);
+        throw new DataModelException(errmsg);
+    }
+    // Just add/set it
+    this.opstate.vnfr[vnfr_id] = 'OFF';
+    return this.vnfr_records[vnfr_id];
+}
+
+DataModel.prototype.vnfrRunningState = function(vnfr_id) {
+    if (!(vnfr_id in this.vnfr_records)) {
+        throw new DataModelException(
+                'DataModel.stopVnfr: Cannot find VNFR with id "%s"', vnfr_id);
+    }
+    if (vnfr_id in this.opstate.vnfr) {
+        return this.opstate.vnfr[vnfr_data];
+    } else {
+        // Assume we are 'ON'
+        return 'ON';
+    }
+}
+
+
+/* ==========================
+ * Debug and helper functions
+ * ==========================
+ */
+
+DataModel.prototype.prettyPrint = function (out) {
+    if (out == undefined) {
+        out = console.log;
+    }
+    out('Descriptors:');
+    for (descriptor_type in this.descriptors) {
+        out("Descriptor type: %s", descriptor_type);
+        for (descriptor_id in this.descriptors[descriptor_type]) {
+            out("data=%s",descriptor_id,
+                    JSON.stringify(this.descriptors[descriptor_type][descriptor_id]));
+        };
+    };
+
+    out('\nConfigRecords:');
+    for (record_type in this.config_records) {
+        out("Record type: %s", record_type);
+        for (record_id in this.config_records[record_type]) {
+            out("data=%s", record_id,
+                    JSON.stringify(this.config_records[record_type][record_id]));
+        };
+    };
+};
+
+
+module.exports = {
+    DataModelException: DataModelException,
+    NotImplementedException: NotImplementedException,
+    MonitoringParam: MonitoringParam,
+    DataModel: DataModel
+};
+
diff --git a/rwlaunchpad/mock/get_data.sh b/rwlaunchpad/mock/get_data.sh
new file mode 100755
index 0000000..508275c
--- /dev/null
+++ b/rwlaunchpad/mock/get_data.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+#
+# This is a convenience script to get descriptors from the RESTConf server
+#
+# Provide the RESTConf hostname as the argument or default to localhost
+
+if [ $# -eq 0 ] ; then
+    HOST=localhost
+else
+    HOST=$1
+fi
+
+echo "Getting descriptor data from $IP"
+
+for descriptor in nsd vnfd vld
+do
+
+    printf "retrieving $descriptor:\n\n"
+
+    curl --user admin:admin \
+        -H "Content-Type: application/vnd.yang.data+json" \
+        -H "accept: application/vnd.yang.data+json" \
+        http://$HOST:8008/api/running/$descriptor-catalog/
+
+done
+
+rectype='ns'
+
+    curl --user admin:admin \
+        -H "Content-Type: application/vnd.yang.data+json" \
+        -H "accept: application/vnd.yang.data+json" \
+        http://$HOST:8008/api/running/$rectype-instance-config/
+
+
+    curl --user admin:admin \
+        -H "Content-Type: application/vnd.yang.data+json" \
+        -H "accept: application/vnd.yang.data+json" \
+        http://$HOST:8008/api/operational/$rectype-instance-opdata/
+
+
+
diff --git a/rwlaunchpad/mock/get_ns_instance_opdata.sh b/rwlaunchpad/mock/get_ns_instance_opdata.sh
new file mode 100755
index 0000000..582fec1
--- /dev/null
+++ b/rwlaunchpad/mock/get_ns_instance_opdata.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+#
+# Provide the RESTConf hostname as the argument or default to localhost
+
+if [ $# -eq 0 ] ; then
+    HOST=localhost
+else
+    HOST=$1
+fi
+
+echo "Getting NS instance opdata from $IP"
+
+curl --user admin:admin \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -H "accept: application/vnd.yang.data+json" \
+    http://$HOST:8008/api/operational/ns-instance-opdata/
+
diff --git a/rwlaunchpad/mock/lp_mock_client.js b/rwlaunchpad/mock/lp_mock_client.js
new file mode 100644
index 0000000..6de0842
--- /dev/null
+++ b/rwlaunchpad/mock/lp_mock_client.js
@@ -0,0 +1,317 @@
+AUTOBAHN_DEBUG = true;
+var autobahn = require('autobahn');
+var uuid = require('node-uuid');
+var _ = require('lodash');
+
+// Our modules
+var dm = require('./data_model.js');
+
+
+var DUMP_RESULTS = false;
+
+// TODO: make the url be configurable via command line arg
+var connection = new autobahn.Connection({
+    url: 'ws://localhost:8090/ws',
+    realm: 'dts_mock'
+});
+
+// Instance of our data model/data store
+var dataModel = new dm.DataModel();
+
+var descriptor_module = (function () {
+
+    my = {};
+
+    /*
+     * This function sets descriptors in the dataModel
+     */
+    function on_config_descriptor_catalog(args) {
+        try {
+            var xpath = args[0];
+            var msg = args[1];
+
+            console.log("\n\n*** Got on_config_descriptor_catalog:\n    (xpath: %s)(msg: %j)", xpath, msg);
+
+            var descriptor_type = xpath.match(new RegExp(/(nsd|vnfd|vld)-catalog/))[1];
+
+            if (descriptor_type in msg) {
+                msg[descriptor_type].forEach(function(entry) {
+                    console.log('Assigning descriptor "%s" with id %s',
+                        descriptor_type, entry.id);
+                    if (descriptor_type == 'vnfd') {
+                        console.log('-- Adding VNFR data');
+                        dataModel.addVnfData(entry);
+                    } else {
+                        // Simply assign
+                        dataModel.setDescriptor(descriptor_type, entry);
+                    }
+                });
+            }
+        } catch(e) {
+            console.error("Caught exception: %s\n\n%s", e, e.stack);
+        }
+    }
+
+    my.register = function (session) {
+        console.log('Registering for descriptor handling');
+        session.subscribe('dts.config.nsd-catalog', on_config_descriptor_catalog);
+        session.subscribe('dts.config.vnfd-catalog', on_config_descriptor_catalog);
+        session.subscribe('dts.config.vld-catalog', on_config_descriptor_catalog);
+    };
+
+    return my;
+}());
+
+
+var instance_module = (function () {
+    my = {};
+
+   function on_config_config(args) {
+        try {
+            var xpath = args[0];
+            var msg = args[1];
+
+            console.log("\n\n*** Got on_config_config:\n    (xpath: %s)(msg: %j)", xpath, msg);
+
+            var record_type = xpath.match(new RegExp(/(ns|vnf|vl)-instance-config/))[1];
+            record_type += 'r';
+
+            console.log('record_type = %s', record_type);
+
+            if (record_type in msg) {
+                msg[record_type].forEach(function(entry) {
+                    console.log('Assigning record (%s) id=%s, descriptor: id=%s',
+                       record_type, entry.id, entry.nsd_ref);
+                    if (record_type == 'nsr') {
+                        dataModel.setNsInstanceConfig(entry);
+                    } else {
+                        // vnfd, vld, which don't have instance_config records yet
+                        dataModel.setConfigRecord(record_type, entry);
+                    }
+                });
+            }
+
+        } catch (e) {
+            console.error("Caught exception: %s\n\n%s", e, e.stack);
+        }
+    }
+
+    /*
+     * Get all nsr opdata records:
+     *   xpath: D,/nsr:ns-instance-opdata/nsr:nsr
+     *   msg: {"nsr":[{"ns_instance_config_ref":""}]}
+     *
+     * Get Ping Pong nsr opdata record:
+     *   xpath: D,/nsr:ns-instance-opdata/nsr:nsr[nsr:ns-instance-config-ref='f5f41f36-78f6-11e5-b9ba-6cb3113b406f']
+     *   msg: {"nsr":[{"ns_instance_config_ref":"f5f41f36-78f6-11e5-b9ba-6cb3113b406f"}]}
+     *
+     * Get monitoring param for nsr instance opdata record:
+     *   xpath: D,/nsr:ns-instance-opdata/nsr:nsr[nsr:ns-instance-config-ref='f5f41f36-78f6-11e5-b9ba-6cb3113b406f']
+     *   msg: {
+     *          "nsr":[{
+     *              "monitoring_param":[{"id":""}],
+     *              "ns_instance_config_ref":"f5f41f36-78f6-11e5-b9ba-6cb3113b406f"
+     *          }]}
+     *
+     * Note that the xpath arg is identical in getting the entire NSR and getting sub-elements in the NSR
+     * The message tells what values to get
+     */
+    function on_get_opdata(args) {
+        try {
+            var xpath = args[0];
+            var msg = args[1];
+            //console.log("\n\n*** Got on_get_opdata:\n   (xpath: %s)(msg: %j)", xpath, msg);
+            console.log("*** Got on_get_opdata:\n   (xpath: %s)(msg: %j)", xpath, msg);
+
+            var record_type = xpath.match(new RegExp(/(ns|vnf|vl)-instance-opdata/))[1];
+            record_type += 'r';
+
+            var gi_type_map = {
+                "nsr": "RwNsrYang.YangData_Nsr_NsInstanceOpdata",
+                "vnfr": "VnfrYang.YangData_Vnfr_VnfInstanceOpdata_Vnfr",
+                "vlr": "VlrYang.YangData_Vlr_VlInstanceOpdata_Vlr"
+            };
+
+            if (record_type == 'nsr') {
+                //console.log("###################\n   data model:\n\n");
+                //dataModel.prettyPrint();
+                var response = {
+                    'nsr': dataModel.getNsInstanceOpdata()
+                };
+                var respond_xpath = 'D,/nsr:ns-instance-opdata';
+            } else {
+                throw new dm.NotImplementedException(
+                        "record_type '%s' is not yet supported.", record_type);
+            }
+
+            var result = new autobahn.Result([
+                'RwNsrYang.YangData_Nsr_NsInstanceOpdata',
+                response
+            ], {"xpath": respond_xpath});
+
+            if (DUMP_RESULTS)
+                console.log("result=\n%s", JSON.stringify(result) );
+
+            return result;
+        } catch(e) {
+            console.error("Caught exception: %s\n\n%s", e, e.stack);
+        }
+    }
+
+    function on_get_vnfr_catalog(args) {
+        try {
+            var xpath = args[0];
+            var msg = args[1];
+            console.log("*** Got on_vnfr_catalog:\n   (xpath: %s)(msg: %j)", xpath, msg);
+
+            var response = {
+                'vnfr': dataModel.getVnfrs()
+            };
+            var respond_xpath = 'D,/vnfr:vnfr-catalog';
+
+            var result = new autobahn.Result([
+                'RwVnfrYang.YangData_Vnfr_VnfrCatalog',
+                response
+            ], {"xpath": respond_xpath});
+
+            if (DUMP_RESULTS)
+                console.log("result=\n%s", JSON.stringify(result) );
+
+            return result;
+        } catch(e) {
+            console.error("Caught exception: %s\n\n%s", e, e.stack);
+        }
+    }
+
+    my.register = function (session) {
+        console.log('Registering for record handling');
+        session.register('dts.data.ns-instance-opdata', on_get_opdata);
+        session.register('dts.data.vnfr-catalog', on_get_vnfr_catalog);
+        session.subscribe('dts.config.ns-instance-config', on_config_config);
+    }
+
+    return my;
+}());
+
+
+var action_module = (function() {
+    my = {};
+
+    /*
+     * Set the specified VNFR operating state
+     *
+     * (xpath: I,/lpmocklet:start-vnfr)
+     * (msg: {"id":"f26b90b0-8184-11e5-bc47-2b429643382b"})
+     */
+    function on_set_opstate(args) {
+        try {
+            var xpath = args[0];
+            var msg = args[1];
+
+            console.log("\n\n*** Got on_start_vnfr:\n    (xpath: %s)(msg: %j)",
+                xpath, msg);
+            var action_match = xpath.match(new RegExp(/lpmocklet:(\w+)-(\w+)/));
+            var action = action_match[1];
+            var obj_type = action_match[2];
+
+            var record_id = msg['id'];
+            console.log('action="%s", obj_type="%s", record_id="%s"',
+                    action, obj_type, record_id);
+
+            if (obj_type == 'vnfr') {
+                if (action == 'start') {
+                    dataModel.startVnfr(record_id);
+                }
+                else if (action == 'stop') {
+                    dataModel.stopVnfr(record_id);
+                }
+                else {
+                    console.error('Unsupported opstate action "%s"', action);
+                }
+            } else {
+                console.error('Unsupported opstate action object: "%s"',
+                        obj_type);
+            }
+
+            console.log('\n\nBuilding response....');
+
+            var response = {
+                id: uuid.v1(),
+                object_type: obj_type,
+                action: action,
+                status: 'SUCCESS' 
+            };
+            var respond_xpath = 'D,/lpmocklet:lpmocklet-action-status';
+            var result = new autobahn.Result([
+                    'LpmockletYang.YangData_Lpmocklet_LpmockletActionStatus',
+                    response
+                    ], {"xpath": respond_xpath});
+
+            console.log('Done running on_set_opdata');
+            return result;
+
+        } catch (e) {
+            console.error("Caught exception: %s\n\n%s", e, e.stack);
+        }
+    }
+
+    function on_set_control_param(args) {
+        try {
+            var xpath = args[0];
+            var msg = args[1];
+
+            console.log("\n\n*** Got on_set_control_param:\n    (xpath: %s)(msg: %j)",
+                xpath, msg);
+
+            // We can ignore xpath. We expect: "I,/lpmocklet:set-control-param"
+// msg: {"set":{"id":"f8d63b30-84b3-11e5-891c-61c6a71edd3c","obj_code":"VNFR","control_id":"ping-packet-size-1","value":10}}
+
+            var response_class = 'LpmockletYang.YangData_Lpmocklet_LpmockletActionStatus';
+            var status = dataModel.updateControlParam(
+                    msg.obj_code.toLowerCase(),
+                    msg.id,
+                    msg.control_id,
+                    msg.value);
+
+            var response = {
+                id: uuid.v1(),
+                object_type: msg.obj_code,
+                action: msg.control_id,
+                status: status
+            };
+
+            var respond_xpath = 'D,/lpmocklet:lpmocklet-action-status';
+            var result = new autobahn.Result([
+                    'LpmockletYang.YangData_Lpmocklet_LpmockletActionStatus',
+                    response
+                    ], {"xpath": respond_xpath});
+
+            console.log('Done running on_set_opdata');
+            return result;
+        } catch (e) {
+            console.error("Caught exception: %s\n\n%s", e, e.stack);
+        }
+    }
+
+    my.register = function(session) {
+        console.log('Registering for action handling');
+        session.register('dts.rpc.start-vnfr', on_set_opstate);
+        session.register('dts.rpc.stop-vnfr', on_set_opstate);
+        session.register('dts.rpc.set-control-param', on_set_control_param);
+    }
+
+    return my;
+
+}());
+
+
+connection.onopen = function (session) {
+    console.log('Connection to wamp server established!');
+    descriptor_module.register(session);
+    instance_module.register(session);
+    action_module.register(session);
+}
+
+console.log('Opening autobahn connection');
+connection.open();
+
diff --git a/rwlaunchpad/mock/package.json b/rwlaunchpad/mock/package.json
new file mode 100644
index 0000000..51e5d89
--- /dev/null
+++ b/rwlaunchpad/mock/package.json
@@ -0,0 +1,18 @@
+{
+  "name": "rw.lp.dts.mock",
+  "version": "1.0.0",
+  "description": "This is node js launchpad dts mock client.",
+  "main": "lp_mock_client.js",
+  "scripts": {
+    "start": "node lp_mock_client"
+  },
+  "author": "JohnBaldwin",
+  "license": "Apache-2.0",
+  "dependencies": {
+    "autobahn": "~0.9.6",
+    "lodash": "~3.10.1",
+    "node-uuid": "~1.4.3",
+    "mocha": "~2.3.3"
+  },
+  "devDependencies": {}
+}
diff --git a/rwlaunchpad/mock/plugins/CMakeLists.txt b/rwlaunchpad/mock/plugins/CMakeLists.txt
new file mode 100644
index 0000000..a10246d
--- /dev/null
+++ b/rwlaunchpad/mock/plugins/CMakeLists.txt
@@ -0,0 +1,25 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+cmake_minimum_required(VERSION 2.0)
+
+set(subdirs
+    yang
+    )
+
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
+
diff --git a/rwlaunchpad/mock/plugins/yang/CMakeLists.txt b/rwlaunchpad/mock/plugins/yang/CMakeLists.txt
new file mode 100644
index 0000000..2d8f2d9
--- /dev/null
+++ b/rwlaunchpad/mock/plugins/yang/CMakeLists.txt
@@ -0,0 +1,32 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+include(rift_yang)
+include(rift_plugin)
+
+rift_add_yang_target(
+    TARGET
+        lpmocklet_yang
+    YANG_FILES
+        lpmocklet.yang
+    COMPONENT
+        ${PKG_LONG_NAME}
+    LIBRARIES
+        mano-types_yang_gen
+)
+
diff --git a/rwlaunchpad/mock/plugins/yang/lpmocklet.tailf.yang b/rwlaunchpad/mock/plugins/yang/lpmocklet.tailf.yang
new file mode 100644
index 0000000..0579add
--- /dev/null
+++ b/rwlaunchpad/mock/plugins/yang/lpmocklet.tailf.yang
@@ -0,0 +1,50 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module lpmocklet-annotation
+{
+    namespace "http://riftio.com/ns/riftware-1.0/lpmocklet-annotation";
+    prefix "lpmocklet-ann";
+
+    import tailf-common {
+        prefix tailf;
+    }
+
+    import lpmocklet {
+        prefix lpmocklet;
+    }
+
+    tailf:annotate "/lpmocklet:lpmocklet-action-status" {
+        tailf:callpoint rw_callpoint;
+    }
+
+    tailf:annotate "/lpmocklet:set-control-param" {
+        tailf:actionpoint rw_actionpoint;
+    }
+
+    tailf:annotate "/lpmocklet:start-vnfr" {
+        tailf:actionpoint rw_actionpoint;
+    }
+
+    tailf:annotate "/lpmocklet:stop-vnfr" {
+        tailf:actionpoint rw_actionpoint;
+    }
+}
+
diff --git a/rwlaunchpad/mock/plugins/yang/lpmocklet.yang b/rwlaunchpad/mock/plugins/yang/lpmocklet.yang
new file mode 100644
index 0000000..819ee40
--- /dev/null
+++ b/rwlaunchpad/mock/plugins/yang/lpmocklet.yang
@@ -0,0 +1,111 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module lpmocklet
+{
+    namespace "http://riftio.com/ns/riftware-1.0/lpmocklet";
+    prefix "lpmocklet";
+
+    import rw-pb-ext {
+        prefix "rwpb";
+    }
+
+    import ietf-inet-types {
+        prefix "inet";
+    }
+
+    import ietf-yang-types {
+        prefix "yang";
+    }
+
+    import mano-types {
+        prefix "manotypes";
+    }
+
+    // Used for LP Mocklet RPC action-param and control-param responses
+    container lpmocklet-action-status {
+        config false;
+        leaf id {
+            type yang:uuid;
+        }
+        // TODO: Make this consistent with 'set-control-param' 'obj-code'
+        leaf object_type {
+            type string;
+        }
+        leaf action {
+            type string;
+        }
+        leaf status {
+            type string;
+        }
+    }
+
+    rpc set-control-param {
+        input {
+            leaf id {
+                description "object id";
+                type yang:uuid;
+                mandatory true;
+            }
+            leaf obj-code {
+                description "Type of object: NS, VNF";
+                type string;
+                mandatory true;
+            }
+            leaf control-id {
+                type string;
+                mandatory true;
+            }
+            // The new vale to assign
+            leaf value {
+                type uint64;
+                mandatory true;
+            }
+        }
+    }
+
+    rpc start-vnfr {
+        input {
+            rwpb:msg-new "StartVnfrInput";
+            leaf id {
+                type yang:uuid;
+                mandatory true;
+            }
+        }
+        output {
+            rwpb:msg-new "StartVnfrOutput";
+            leaf status {
+                description "status of the start request";
+                type string;
+            }
+        }
+    }
+
+    rpc stop-vnfr {
+        input {
+            rwpb:msg-new "StopVnfr";
+            leaf id {
+                type yang:uuid;
+                mandatory true;
+            }
+        }
+    }
+}
+
diff --git a/rwlaunchpad/mock/set_data.sh b/rwlaunchpad/mock/set_data.sh
new file mode 100755
index 0000000..4a39c0a
--- /dev/null
+++ b/rwlaunchpad/mock/set_data.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+#
+# This script posts descriptor data (NSD, VNFD, VLD) to the RESTConf server
+#
+# Provide the RESTConf hostname as the argument or default to localhost
+#
+
+if [ $# -eq 0 ] ; then
+    HOST=localhost
+else
+    HOST=$1
+fi
+
+echo "Posting descriptor data to $HOST"
+
+
+#for descriptor in nsd vnfd vld
+
+for descriptor in nsd vnfd
+do
+    echo "Assigning data to descriptor \"$descriptor\""
+
+    curl --user admin:admin \
+        -H "Content-Type: application/vnd.yang.data+json" \
+        -X POST \
+        -d @data/${descriptor}_catalog.json \
+        http://$HOST:8008/api/running/$descriptor-catalog/ -v
+
+done
+
+for rectype in ns
+do
+    echo "Assigning data to instance config \"$rectype\""
+
+    curl --user admin:admin \
+        -H "Content-Type: application/vnd.yang.data+json" \
+        -X POST \
+        -d @data/${rectype}-instance-config.json \
+        http://$HOST:8008/api/running/$rectype-instance-config/ -v
+
+done
+
diff --git a/rwlaunchpad/mock/set_ping_pong.sh b/rwlaunchpad/mock/set_ping_pong.sh
new file mode 100755
index 0000000..11126bd
--- /dev/null
+++ b/rwlaunchpad/mock/set_ping_pong.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+#
+# This script posts descriptor data (NSD, VNFD, VLD) to the RESTConf server
+#
+# Provide the RESTConf hostname as the argument or default to localhost
+#
+
+if [ $# -eq 0 ] ; then
+    HOST=localhost
+else
+    HOST=$1
+fi
+
+echo "Posting descriptor data to $HOST"
+
+for rectype in vnfd nsd
+do
+    echo "Assigning data to instance config \"$rectype\""
+
+    curl --user admin:admin \
+        -H "Content-Type: application/vnd.yang.data+json" \
+        -X POST \
+        -d @data/ping-pong-${rectype}.json \
+        http://$HOST:8008/api/running/$rectype-catalog/ -v
+
+    # Add sleep here if vnfd is not ready on server
+done
+
+curl --user admin:admin \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d @data/ping-pong-ns-instance-config.json \
+    http://$HOST:8008/api/running/ns-instance-config/ -v
+
diff --git a/rwlaunchpad/mock/simmp.js b/rwlaunchpad/mock/simmp.js
new file mode 100644
index 0000000..9d0628f
--- /dev/null
+++ b/rwlaunchpad/mock/simmp.js
@@ -0,0 +1,87 @@
+
+var _ = require('lodash');
+
+/*
+ * Args:
+ * rules - object with the monitoring param to simulator function mapping
+ *         see data/simmp.json
+ */
+SimMp = function(rules) {
+    this.rules = _.clone(rules, true);
+};
+
+//SimMp.prototype.map_rule = function(mp_id) {
+//    return this.rules['mp-mapper'][mp_id];
+//}
+
+// Use the monitoring param id for now
+SimMp.prototype.createSimMonitorFunc = function(mp) {
+
+    // Define our core simulation function here
+    //
+    // min, max inclusive
+    var rand_func = function(min, max) {
+        return Math.floor(Math.random() * (max-min+1)) + min;
+    }
+
+    var funcs = {
+        // transmit and receive rate
+        tx_rc_rate: function(value, elapsed_seconds) {
+            // Ignore elapsed time for first implementation of transmit and
+            // receive rate simulation.
+            // This is just a quick and dirty and simple implementation to make
+            // the monitoring params change, stay within bounds, and not swing
+            // wildly.
+            var min_val = mp.min_value;
+            var max_val = mp.max_value;
+            // Set an outer bound of maxmium change from current value
+            // Tweak bin_count to set how much the value can swing from the
+            // last value
+            var bin_count = 10;
+            // Set the range we can generate the new value based on a function
+            //  of the difference of the max and min values
+            var max_delta = (max_val - min_val) / bin_count;
+            console.log('Setting max_delta = %s', max_delta);
+            var new_val = rand_func(
+                    Math.max(min_val, value-max_delta),
+                    Math.min(max_val, value+max_delta));
+            //console.log("Generated value: %s", new_val);
+            return new_val;
+        },
+        packet_size: function(value, elapsed_seconds) {
+            // Stub method just returns value unchanged
+            // TODO: Figure out how we want to vary packet sizes
+            return value;
+        },
+        accumulate: function(value, elapsed_seconds) {
+            // NOT TESTED. Basic idea. Will want to add variablility
+            // how fast we accumulate
+            var accumulate_rate = 0.1;
+            var new_value = value + (elapsed_seconds * accumulate_rate);
+            return new_value;
+        }
+        // add growth function
+    };
+
+    // Declare our monitoring param id to sim function mapping here
+    // TODO: Move out to a yaml/json file and make this function belong to
+    // a 'Class'
+    //var mapper = {
+    //    'tx-rate-pp1': funcs['tx_rc_rate'],
+    //    'rc-rate-pp1': funcs['tx_rc_rate'] 
+    //};
+
+    var sim_func_name = this.rules['mp-mapper'][mp.id];
+    if (sim_func_name) {
+        return funcs[sim_func_name];
+    } else {
+        console.log('No time step sim function found for monitoring param with id "%s", using constant value', mp.id); 
+        return function(value, elapsed_seconds) {
+            return value;
+        }
+    }
+}
+
+module.exports = {
+    SimMp: SimMp
+};
diff --git a/rwlaunchpad/mock/test/test_simmp.js b/rwlaunchpad/mock/test/test_simmp.js
new file mode 100644
index 0000000..08833cf
--- /dev/null
+++ b/rwlaunchpad/mock/test/test_simmp.js
@@ -0,0 +1,28 @@
+var assert = require('assert');
+
+var simmp_module = require('../simmp.js');
+
+// This is an example test on SimMp. It is not a very good test, but shows
+// how to write a basic test in mocha
+describe('SimMp', function() {
+    describe('#createSimMonitorFunc()', function () {
+        it('should return tx_rc_rate', function () {
+            var mp = {
+                id: 'tx-rate-pp1',
+                min_value: 0,
+                max_value: 100,
+                current_value: 0
+            };
+            var simmp = new simmp_module.SimMp({
+                "mp-mapper": { "tx-rate-pp1": "tx_rc_rate" }
+            });
+            assert(simmp != null, 'Could not instantiate simmp');
+            var func = simmp.createSimMonitorFunc(mp);
+            var value = func(0);
+            assert(value >= mp.min_value, 'value less than min value);
+            assert(value <= mp.max_value, 'value greater than max value');
+
+       });
+    });
+});
+
diff --git a/rwlaunchpad/plugins/CMakeLists.txt b/rwlaunchpad/plugins/CMakeLists.txt
new file mode 100644
index 0000000..dfc3ce0
--- /dev/null
+++ b/rwlaunchpad/plugins/CMakeLists.txt
@@ -0,0 +1,41 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Joshua Downer
+# Author(s): Austin Cormier
+# Creation Date: 5/12/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs
+  cli
+  rwimagemgr
+  rwlaunchpadtasklet
+  rwautoscaler
+  rwmonitor
+  rwmonparam
+  rwnsm
+  rwresmgr
+  rwvnfm
+  rwvns
+  vala
+  yang
+  )
+
+##
+# Include the subdirs
+##
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/plugins/cli/CMakeLists.txt b/rwlaunchpad/plugins/cli/CMakeLists.txt
new file mode 100644
index 0000000..0819297
--- /dev/null
+++ b/rwlaunchpad/plugins/cli/CMakeLists.txt
@@ -0,0 +1,30 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 6/11/2016
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+##
+# Install the XML file
+##
+install(
+  FILES
+  cli_launchpad_schema_listing.txt
+  DESTINATION usr/data/manifest
+  COMPONENT ${PKG_LONG_NAME}
+)
diff --git a/rwlaunchpad/plugins/cli/cli_launchpad_schema_listing.txt b/rwlaunchpad/plugins/cli/cli_launchpad_schema_listing.txt
new file mode 100644
index 0000000..c64cff6
--- /dev/null
+++ b/rwlaunchpad/plugins/cli/cli_launchpad_schema_listing.txt
@@ -0,0 +1,55 @@
+ietf-inet-types
+ietf-l2-topology
+ietf-netconf-notifications
+ietf-network
+ietf-network-topology
+ietf-restconf-monitoring
+ietf-yang-types
+mano-types
+nsd
+nsr
+rw-base
+rwcal
+rw-cli-ext
+rw-cloud
+rw-config-agent
+rw-conman
+rw-debug
+rw-dts
+rw-dtsperf
+rw-dtsperfmgr
+rw-launchpad
+rw-image-mgmt
+rw-log
+rwlog-mgmt
+rw-manifest
+rw-memlog
+rw-mgmtagt
+rw-mgmt-schema
+rwmsg-data
+rw-netconf
+rw-restconf
+rw-notify-ext
+rw-nsd
+rw-nsm
+rw-nsr
+rw-pb-ext
+rw-resource-mgr
+rw-restportforward
+rwsdn
+rw-sdn
+rwshell-mgmt
+rw-sorch
+rw-topology
+rw-vcs
+rwvcs-types
+rw-vld
+rw-vlr
+rw-vnfd
+rw-vnfr
+rw-yang-types
+vld
+vlr
+vnfd
+vnffgd
+vnfr
diff --git a/rwlaunchpad/plugins/rwautoscaler/CMakeLists.txt b/rwlaunchpad/plugins/rwautoscaler/CMakeLists.txt
new file mode 100644
index 0000000..533588e
--- /dev/null
+++ b/rwlaunchpad/plugins/rwautoscaler/CMakeLists.txt
@@ -0,0 +1,41 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Varun Prasad
+# Creation Date: 2016/07/01
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwautoscaler)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
+    rift/tasklets/${TASKLET_NAME}/engine.py
+    rift/tasklets/${TASKLET_NAME}/scaling_operation.py
+    rift/tasklets/${TASKLET_NAME}/subscribers.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/rwlaunchpad/plugins/rwautoscaler/Makefile b/rwlaunchpad/plugins/rwautoscaler/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwautoscaler/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/__init__.py b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/__init__.py
new file mode 100644
index 0000000..3bdb192
--- /dev/null
+++ b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/__init__.py
@@ -0,0 +1 @@
+from .rwautoscaler import AutoScalerTasklet
diff --git a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/engine.py b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/engine.py
new file mode 100644
index 0000000..d71aefc
--- /dev/null
+++ b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/engine.py
@@ -0,0 +1,422 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import abc
+import asyncio
+import time
+
+import numpy
+
+from . import scaling_operation
+from . import subscribers as monp_subscriber
+from gi.repository import RwDts as rwdts
+import rift.mano.dts as subscriber
+
+
+class TimeSeries:
+    """Convenience class to hold the data for the sliding window size.
+    """
+    def __init__(self, threshold_time):
+        """
+        Args:
+            threshold_time (int): window size in secs
+        """
+
+        # 0 -> contains a list of all timestamps
+        # 1 -> contains a list of all values.
+        self._series = numpy.empty(shape=(2, 1), dtype='int64')
+        self.threshold_time = threshold_time
+
+    def add_value(self, timestamp, value):
+        timestamp = int(timestamp)
+
+        self._series = numpy.append(
+                self._series,
+                [[timestamp], [value]],
+                axis=1)
+
+        # Drop off stale value
+        # 0 -> timestamp
+        # 1 -> values
+        # Get all indexes that are outside the window, and drop them
+        window_values = self._series[0] >= (timestamp - self.threshold_time)
+        self._series = self._series[:, window_values]
+
+    def average(self):
+        return numpy.average(self._series[1])
+
+    def is_window_full(self):
+        """Verify if there is sufficient data for the current window.
+        """
+        if len(self._series[0]) <= 2:
+            return False
+
+        start_time = self._series[0][0]
+        end_time = self._series[0][-1]
+
+        if (end_time - start_time) >= self.threshold_time:
+            return True
+
+        return False
+
+
+class ScalingCriteria:
+    class Delegate:
+        """Delegate: callbacks triggered by ScalingCriteris
+        """
+        @abc.abstractmethod
+        def threshold_out_breached(self, criteria_name, avg_value):
+            """Called when the value has crossed the scale-out-threshold
+
+            Args:
+                criteria_name (str): Criteria name
+                avg_value (float): The average value of the window.
+
+            """
+            pass
+
+        @abc.abstractmethod
+        def threshold_in_breached(self, criteria_name, avg_value):
+            """Called when the value has drops below the scale-in-threshold
+
+            Args:
+                criteria_name (str): Criteria name
+                avg_value (float): The average value of the window.
+
+            """
+
+            pass
+
+    def __init__(
+            self,
+            log,
+            dts,
+            loop,
+            nsr_id,
+            monp_id,
+            scaling_criteria,
+            window_size,
+            sampling_period=1,
+            delegate=None):
+        """
+        Args:
+            log : Log
+            dts : DTS handle
+            loop : Event Handle
+            nsr_id (str): NSR ID
+            monp_id (str): Monitoring parameter
+            scaling_criteria : Yang data model
+            window_size (int): Length of the window
+            delegate : ScalingCriteria.Delegate
+
+        Note:
+
+        """
+        self.log = log
+        self.dts = dts
+        self.loop = loop
+        self.sampling_period = sampling_period
+        self.window_size = window_size
+        self.delegate = delegate
+        self.nsr_id, self.monp_id = nsr_id, monp_id
+
+        self._scaling_criteria = scaling_criteria
+        self._timeseries = TimeSeries(self.window_size)
+        # Flag when set, triggers scale-in request.
+        self._scl_in_limit_enabled = False
+
+        self.nsr_monp_sub = monp_subscriber.NsrMonParamSubscriber(
+                self.log,
+                self.dts,
+                self.loop,
+                self.nsr_id,
+                self.monp_id,
+                callback=self.add_value)
+
+    @property
+    def name(self):
+        return self._scaling_criteria.name
+
+    @property
+    def scale_in(self):
+        return self._scaling_criteria.scale_in_threshold
+
+    @property
+    def scale_out(self):
+        return self._scaling_criteria.scale_out_threshold
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self.nsr_monp_sub.register()
+
+    def deregister(self):
+        self.nsr_monp_sub.deregister()
+
+    def trigger_action(self, timestamp, avg):
+        """Triggers the scale out/in
+
+        Args:
+            timestamp : time in unix epoch
+            avg : Average of all the values in the window size.
+
+        """
+        if self._timeseries.average() >= self.scale_out:
+            # Enable the scale in limit, only when a scale-out has happened.
+            self._scl_in_limit_enabled = True
+            self.delegate.threshold_out_breached(self.name, avg)
+
+        elif self._timeseries.average() < self.scale_in and self._scl_in_limit_enabled:
+            self._scl_in_limit_enabled = False
+            self.delegate.threshold_in_breached(self.name, avg)
+
+
+    def add_value(self, monp, action):
+        """Callback from NsrMonParamSubscriber
+
+        Args:
+            monp : Yang model
+            action : rwdts.QueryAction
+        """
+        if action == rwdts.QueryAction.DELETE:
+            return
+
+        value = monp.value_integer
+        timestamp = time.time()
+
+        self._timeseries.add_value(timestamp, value)
+
+        if not self._timeseries.is_window_full():
+            return
+
+        if not self.delegate:
+            return
+
+        self.trigger_action(timestamp, value)
+
+
+class ScalingPolicy(ScalingCriteria.Delegate):
+    class Delegate:
+        @abc.abstractmethod
+        def scale_in(self, scaling_group_name, nsr_id):
+            """Delegate called when all the criteria for scaling-in are met.
+
+            Args:
+                scaling_group_name (str): Description
+                nsr_id (str): Description
+
+            """
+            pass
+
+        @abc.abstractmethod
+        def scale_out(self, scaling_group_name, nsr_id):
+            """Delegate called when all the criteria for scaling-out are met.
+
+            Args:
+                scaling_group_name (str): Description
+                nsr_id (str): Description
+            """
+            pass
+
+    def __init__(
+            self,
+            log,
+            dts,
+            loop,
+            nsr_id,
+            nsd_id,
+            scaling_group_name,
+            scaling_policy,
+            store,
+            delegate=None):
+        """
+
+        Args:
+            log : Log
+            dts : DTS handle
+            loop : Event loop
+            nsr_id (str): NSR id
+            nsd_id (str): NSD id
+            scaling_group_name (str): Scaling group ref
+            scaling_policy : Yang model
+            store (SubscriberStore): Subscriber store instance
+            delegate (None, optional): ScalingPolicy.Delegate
+        """
+        self.loop = loop
+        self.log = log
+        self.dts = dts
+        self.nsd_id = nsd_id
+        self.nsr_id = nsr_id
+        self.scaling_group_name = scaling_group_name
+
+        self._scaling_policy = scaling_policy
+        self.delegate = delegate
+        self.store = store
+
+        self.monp_sub = monp_subscriber.NsrMonParamSubscriber(
+                                self.log,
+                                self.dts,
+                                self.loop,
+                                self.nsr_id,
+                                callback=self.handle_nsr_monp)
+
+        self.criteria_store = {}
+
+        # Timestamp at which the scale-in/scale-out request was generated.
+        self._last_triggered_time = None
+        self.scale_in_status = {cri.name: False for cri in self.scaling_criteria}
+        self.scale_out_status = {cri.name: False for cri in self.scaling_criteria}
+
+    def get_nsd_monp_cfg(self, nsr_monp):
+        """Get the NSD's mon-param config.
+        """
+        nsd = self.store.get_nsd(self.nsd_id)
+        for monp in nsd.monitoring_param:
+            if monp.id == nsr_monp.nsd_mon_param_ref:
+                return monp
+
+    def handle_nsr_monp(self, monp, action):
+        """Callback for NSR mon-param handler.
+
+        Args:
+            monp : Yang Model
+            action : rwdts.QueryAction
+        
+        """
+        def handle_create():
+            if monp.id in self.criteria_store:
+                return
+
+            nsd_monp = self.get_nsd_monp_cfg(monp)
+            for cri in self.scaling_criteria:
+                if cri.ns_monitoring_param_ref != nsd_monp.id:
+                    continue
+
+                # Create a criteria object as soon as the first monitoring data
+                # is published.
+                criteria = ScalingCriteria(
+                        self.log,
+                        self.dts,
+                        self.loop,
+                        self.nsr_id,
+                        monp.id,
+                        cri,
+                        self.threshold_time,  # window size
+                        delegate=self)
+
+                self.criteria_store[monp.id] = criteria
+
+                @asyncio.coroutine
+                def task():
+                    yield from criteria.register()
+
+                self.loop.create_task(task())
+
+        def handle_delete():
+            if monp.id in self.criteria_store:
+                self.criteria_store[monp.id].deregister()
+                del self.criteria_store[monp.id]
+
+        if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+            handle_create()
+        elif action == rwdts.QueryAction.DELETE:
+            handle_delete()
+
+
+    @property
+    def scaling_criteria(self):
+        return self._scaling_policy.scaling_criteria
+
+    @property
+    def scale_in_op(self):
+        optype = self._scaling_policy.scale_in_operation_type
+        return scaling_operation.get_operation(optype)
+
+    @property
+    def scale_out_op(self):
+        optype = self._scaling_policy.scale_out_operation_type
+        return scaling_operation.get_operation(optype)
+
+    @property
+    def name(self):
+        return self._scaling_policy.name
+
+    @property
+    def threshold_time(self):
+        return self._scaling_policy.threshold_time
+
+    @property
+    def cooldown_time(self):
+        return self._scaling_policy.cooldown_time
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self.monp_sub.register()
+
+    def deregister(self):
+        self.monp_sub.deregister()
+
+    def _is_in_cooldown(self):
+        """Verify if the current policy is in cooldown.
+        """
+        if not self._last_triggered_time:
+            return False
+
+        if (time.time() - self._last_triggered_time) >= self.cooldown_time:
+            return False
+
+        return True
+
+    def threshold_in_breached(self, criteria_name, value):
+        """Delegate callback when scale-in threshold is breached
+
+        Args:
+            criteria_name : Criteria name
+            value : Average value
+        """
+        if self._is_in_cooldown():
+            return
+
+        self.scale_in_status[criteria_name] = True
+
+        statuses = self.scale_in_status.values()
+        is_breached = self.scale_in_op(statuses)
+
+        if is_breached and self.delegate:
+            self._last_triggered_time = time.time()
+            # Reset all statuses
+            self.scale_in_status = {cri.name: False for cri in self.scaling_criteria}
+            self.delegate.scale_in(self.scaling_group_name, self.nsr_id)
+
+    def threshold_out_breached(self, criteria_name, value):
+        """Delegate callback when scale-out threshold is breached.
+        Args:
+            criteria_name : Criteria name
+            value : Average value
+        """
+        if self._is_in_cooldown():
+            return
+
+        self.scale_out_status[criteria_name] = True
+
+        statuses = self.scale_out_status.values()
+        is_breached = self.scale_out_op(statuses)
+
+        if is_breached and self.delegate:
+            self._last_triggered_time = time.time()
+            # Reset all statuses
+            self.scale_out_status = {cri.name: False for cri in self.scaling_criteria}
+            self.delegate.scale_out(self.scaling_group_name, self.nsr_id)
diff --git a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/rwautoscaler.py b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/rwautoscaler.py
new file mode 100644
index 0000000..affa579
--- /dev/null
+++ b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/rwautoscaler.py
@@ -0,0 +1,230 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file rwautoscaler.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 01-Jul-2016
+
+"""
+import asyncio
+import collections
+
+from . import engine
+from . import subscribers as monp_subscriber
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('NsrYang', '1.0')
+
+from gi.repository import (
+        RwDts as rwdts,
+        NsrYang,
+        RwLaunchpadYang,
+        ProtobufC)
+import rift.mano.cloud
+import rift.mano.dts as subscriber
+import rift.tasklets
+
+
+
+class AutoScalerTasklet(rift.tasklets.Tasklet, engine.ScalingPolicy.Delegate):
+    """The main task of this Tasklet is to listen for NSR changes and once the
+    NSR is configured, ScalingPolicy is created.
+    """
+    def __init__(self, *args, **kwargs):
+
+        try:
+            super().__init__(*args, **kwargs)
+            self.store = None
+            self.monparam_store = None
+
+            self.nsr_sub = None
+            self.nsr_monp_subscribers = {}
+            self.instance_id_store = collections.defaultdict(list)
+
+        except Exception as e:
+            self.log.exception(e)
+
+    def start(self):
+        super().start()
+
+        self.log.debug("Registering with dts")
+
+        self.dts = rift.tasklets.DTS(
+                self.tasklet_info,
+                RwLaunchpadYang.get_schema(),
+                self.loop,
+                self.on_dts_state_change
+                )
+
+        self.store = subscriber.SubscriberStore.from_tasklet(self)
+        self.nsr_sub = subscriber.NsrCatalogSubscriber(self.log, self.dts, self.loop, self.handle_nsr)
+
+        self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+    def stop(self):
+        try:
+            self.dts.deinit()
+        except Exception as e:
+            self.log.exception(e)
+
+    @asyncio.coroutine
+    def init(self):
+        self.log.debug("creating vnfr subscriber")
+        yield from self.store.register()
+        yield from self.nsr_sub.register()
+
+    @asyncio.coroutine
+    def run(self):
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Handle DTS state change
+
+        Take action according to current DTS state to transition application
+        into the corresponding application state
+
+        Arguments
+            state - current dts state
+
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self.dts.handle.set_state(next_state)
+
+    def scale_in(self, scaling_group_name, nsr_id):
+        """Delegate callback
+
+        Args:
+            scaling_group_name (str): Scaling group name to be scaled in
+            nsr_id (str): NSR id
+
+        """
+        self.log.info("Sending a scaling-in request for {} in NSR: {}".format(
+                scaling_group_name,
+                nsr_id))
+
+        @asyncio.coroutine
+        def _scale_in():
+            instance_id = self.instance_id_store[(scaling_group_name, nsr_id)].pop()
+
+            # Trigger an rpc
+            rpc_ip = NsrYang.YangInput_Nsr_ExecScaleIn.from_dict({
+                'nsr_id_ref': nsr_id,
+                'instance_id': instance_id,
+                'scaling_group_name_ref': scaling_group_name})
+
+            rpc_out = yield from self.dts.query_rpc(
+                        "/nsr:exec-scale-in",
+                        0,
+                        rpc_ip)
+
+        self.loop.create_task(_scale_in())
+
+    def scale_out(self, scaling_group_name, nsr_id):
+        """Delegate callback for scale out requests
+
+        Args:
+            scaling_group_name (str): Scaling group name
+            nsr_id (str): NSR ID
+        """
+        self.log.info("Sending a scaling-out request for {} in NSR: {}".format(
+                scaling_group_name,
+                nsr_id))
+
+        @asyncio.coroutine
+        def _scale_out():
+            # Trigger an rpc
+            rpc_ip = NsrYang.YangInput_Nsr_ExecScaleOut.from_dict({
+                'nsr_id_ref': nsr_id ,
+                'scaling_group_name_ref': scaling_group_name})
+
+            itr = yield from self.dts.query_rpc("/nsr:exec-scale-out", 0, rpc_ip)
+
+            key = (scaling_group_name, nsr_id)
+            for res in itr:
+                result = yield from res
+                rpc_out = result.result
+                self.instance_id_store[key].append(rpc_out.instance_id)
+
+                self.log.info("Created new scaling group {} with instance id {}".format(
+                        scaling_group_name,
+                        rpc_out.instance_id))
+
+        self.loop.create_task(_scale_out())
+
+
+    def handle_nsr(self, nsr, action):
+        """Callback for NSR opdata changes. Creates a publisher for every
+        NS that moves to config state.
+
+        Args:
+            nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): Ns Opdata
+            action (rwdts.QueryAction): Action type of the change.
+        """
+        def nsr_create():
+            if nsr.config_status == "configured" and nsr.ns_instance_config_ref not in self.nsr_monp_subscribers:
+                nsr_id = nsr.ns_instance_config_ref
+                self.nsr_monp_subscribers[nsr_id] = []
+                nsd = self.store.get_nsd(nsr.nsd_ref)
+                @asyncio.coroutine
+                def task():
+                    for scaling_group in nsd.scaling_group_descriptor:
+                        for policy_cfg in scaling_group.scaling_policy:
+                            policy = engine.ScalingPolicy(
+                                self.log, self.dts, self.loop,
+                                nsr.ns_instance_config_ref,
+                                nsr.nsd_ref,
+                                scaling_group.name,
+                                policy_cfg,
+                                self.store,
+                                delegate=self)
+                            self.nsr_monp_subscribers[nsr_id].append(policy)
+                            yield from policy.register()
+
+                self.loop.create_task(task())
+
+
+        def nsr_delete():
+            if nsr.ns_instance_config_ref in self.nsr_monp_subscribers:
+                policies = self.nsr_monp_subscribers[nsr.ns_instance_config_ref]
+                for policy in policies:
+                    policy.deregister()
+                del self.nsr_monp_subscribers[nsr.ns_instance_config_ref]
+
+        if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+            nsr_create()
+        elif action == rwdts.QueryAction.DELETE:
+            nsr_delete()
diff --git a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/scaling_operation.py b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/scaling_operation.py
new file mode 100644
index 0000000..c5ffb3c
--- /dev/null
+++ b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/scaling_operation.py
@@ -0,0 +1,41 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import abc
+
+def get_operation(operation):
+
+    op_map = {"AND": AndScalingOperation(),
+              "OR": OrScalingOperation()}
+
+    return op_map[operation]
+
+
+class ScalingOperation:
+    @abc.abstractmethod
+    def __call__(self, statuses):
+        pass
+
+
+class AndScalingOperation():
+    def __call__(self, statuses):
+        return all(statuses)
+
+
+class OrScalingOperation():
+    def __call__(self, statuses):
+        return any(statuses)
diff --git a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/subscribers.py b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/subscribers.py
new file mode 100644
index 0000000..04185b6
--- /dev/null
+++ b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/subscribers.py
@@ -0,0 +1,40 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import rift.mano.dts as mano_dts
+
+
+class NsrMonParamSubscriber(mano_dts.AbstractOpdataSubscriber):
+    """Registers for NSR monitoring parameter changes.
+    
+    Attributes:
+        monp_id (str): Monitoring Param ID
+        nsr_id (str): NSR ID
+    """
+    def __init__(self, log, dts, loop, nsr_id, monp_id=None, callback=None):
+        super().__init__(log, dts, loop, callback)
+        self.nsr_id = nsr_id
+        self.monp_id = monp_id
+
+    def get_xpath(self):
+        return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
+            "[nsr:ns-instance-config-ref='{}']".format(self.nsr_id) +
+            "/nsr:monitoring-param" +
+            ("[nsr:id='{}']".format(self.monp_id) if self.monp_id else ""))
+
+
+
diff --git a/rwlaunchpad/plugins/rwautoscaler/rwautoscaler.py b/rwlaunchpad/plugins/rwautoscaler/rwautoscaler.py
new file mode 100644
index 0000000..7fc24ad
--- /dev/null
+++ b/rwlaunchpad/plugins/rwautoscaler/rwautoscaler.py
@@ -0,0 +1,25 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwautoscaler
+
+class Tasklet(rift.tasklets.rwautoscaler.AutoScalerTasklet):
+    pass
diff --git a/rwlaunchpad/plugins/rwautoscaler/test/utest_autoscaler_dts.py b/rwlaunchpad/plugins/rwautoscaler/test/utest_autoscaler_dts.py
new file mode 100644
index 0000000..78342ce
--- /dev/null
+++ b/rwlaunchpad/plugins/rwautoscaler/test/utest_autoscaler_dts.py
@@ -0,0 +1,350 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import asyncio
+import os
+import sys
+import unittest
+import random
+
+import xmlrunner
+import unittest.mock as mock
+
+import rift.test.dts
+import rift.tasklets.rwautoscaler.engine as engine
+import gi
+gi.require_version('RwDtsYang', '1.0')
+from gi.repository import (
+        RwNsrYang,
+        NsrYang,
+        NsdYang,
+        RwLaunchpadYang as launchpadyang,
+        RwVnfrYang,
+        RwVnfdYang,
+        RwNsdYang
+        )
+
+
+ScalingCriteria = NsdYang.YangData_Nsd_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy_ScalingCriteria
+ScalingPolicy = NsdYang.YangData_Nsd_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy
+
+
+class MockDelegate(engine.ScalingCriteria.Delegate):
+    def __init__(self):
+        self.scale_in_called = 0
+        self.scale_out_called = 0
+
+    def scale_in(self, name, val):
+        print ("=============================================")
+        print ("Scaling IN")
+        print ("=============================================")
+        self.scale_in_called += 1
+
+    def scale_out(self, name, val):
+        print ("=============================================")
+        print ("Scaling OUT")
+        print ("=============================================")
+        self.scale_out_called += 1
+
+
+class MockStore():
+    def __init__(self, aggregation_type="AVERAGE", legacy=False):
+        self.aggregation_type = aggregation_type
+        self.legacy = legacy
+        self.threshold_time = 3
+
+    def __call__(self):
+        store = mock.MagicMock()
+
+        mock_vnfd =  RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict({
+            'id': "1",
+            'monitoring_param': [
+                {'description': 'no of ping requests',
+                 'group_tag': 'Group1',
+                 'http_endpoint_ref': 'api/v1/ping/stats',
+                 'id': '1',
+                 'json_query_method': 'NAMEKEY',
+                 'name': 'ping-request-tx-count',
+                 'units': 'packets',
+                 'value_type': 'INT',
+                 'widget_type': 'COUNTER'},
+                {'description': 'no of ping responses',
+                 'group_tag': 'Group1',
+                 'http_endpoint_ref': 'api/v1/ping/stats',
+                 'id': '2',
+                 'json_query_method': 'NAMEKEY',
+                 'name': 'ping-response-rx-count',
+                 'units': 'packets',
+                 'value_type': 'INT',
+                 'widget_type': 'COUNTER'}],
+            })
+
+        store.get_vnfd = mock.MagicMock(return_value=mock_vnfd)
+
+        mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({
+            'id': '1',
+            'vnfd_ref': '1',
+            })
+        store.get_vnfr = mock.MagicMock(return_value=mock_vnfr)
+
+        mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict({
+            'ns_instance_config_ref': "1",
+            'name_ref': "Foo",
+            'nsd_ref': '1',
+            'config_status': 'configured',
+            'constituent_vnfr_ref': [{'vnfr_id': mock_vnfr.id}],
+            })
+
+        store.get_nsr = mock.MagicMock(return_value=mock_nsr)
+        store.nsr = [mock_nsr]
+
+        monp_cfg = [{'aggregation_type': self.aggregation_type,
+                 'id': '1',
+                 'name': 'ping-request-tx-count',
+                 'value_type': 'INT',
+                 'vnfd_monitoring_param': [
+                    {'vnfd_id_ref': '1',
+                     'vnfd_monitoring_param_ref': '1'},
+                    {'vnfd_id_ref': '1',
+                     'vnfd_monitoring_param_ref': '2'}]
+                },
+                {'aggregation_type': self.aggregation_type,
+                 'id': '2',
+                 'name': 'ping-request-tx-count',
+                 'value_type': 'INT',
+                 'vnfd_monitoring_param': [
+                    {'vnfd_id_ref': '1',
+                     'vnfd_monitoring_param_ref': '1'},
+                    {'vnfd_id_ref': '1',
+                     'vnfd_monitoring_param_ref': '2'}]
+                }]
+
+        scale_in_val = 100
+        scale_out_val = 200
+
+        mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict({
+            'id': '1',
+            'monitoring_param': (monp_cfg if not self.legacy else []),
+            'constituent_vnfd': [{'member_vnf_index': 1,
+                 'start_by_default': True,
+                 'vnfd_id_ref': '1'},
+                {'member_vnf_index': 2,
+                 'start_by_default': True,
+                 'vnfd_id_ref': '1'}],
+            'scaling_group_descriptor': [{
+                    "name": "http",
+                    "vnfd_member": [{
+                        'member_vnf_index_ref': 1,
+                    }],
+                    "scaling_policy": [{
+                        "scaling_type": "automatic",
+                        "enabled": True,
+                        "threshold_time": self.threshold_time,
+                        "cooldown_time": 60,
+                        "scale_out_operation_type": "AND",
+                        "scale_in_operation_type": "AND",
+                        "scaling_criteria": [{
+                            "name": "1",
+                            "scale_in_threshold": scale_in_val,
+                            "scale_out_threshold": scale_out_val,
+                            "ns_monitoring_param_ref": "1"
+                        },
+                        {
+                            "name": "2",
+                            "scale_in_threshold": scale_in_val,
+                            "scale_out_threshold": scale_out_val,
+                            "ns_monitoring_param_ref": "2"
+                        }]
+                    }]
+                }]
+            })
+
+        store.get_nsd = mock.MagicMock(return_value=mock_nsd)
+
+        return store
+
+
+class AutoscalarDtsTestCase(rift.test.dts.AbstractDTSTest):
+    @classmethod
+    def configure_schema(cls):
+        return launchpadyang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", test_id)
+        self.tinfo = self.new_tinfo(str(test_id))
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+
+        self.tinfo_sub = self.new_tinfo(str(test_id) + "_sub")
+        self.dts_sub = rift.tasklets.DTS(self.tinfo_sub, self.schema, self.loop)
+
+        self.mock_store = MockStore()
+
+    def tearDown(self):
+        super().tearDown()
+
+    @asyncio.coroutine
+    def _populate_mock_values(self, criterias, nsr_id, floor, ceil):
+        # Mock publish
+        # Verify Scale in AND operator
+        NsMonParam = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
+
+        publisher = rift.test.dts.DescriptorPublisher(self.log, self.dts, self.loop)
+
+        for criteria in criterias:
+            monp_id = criteria.ns_monitoring_param_ref
+            w_xpath = "D,/nsr:ns-instance-opdata/nsr:nsr"
+            w_xpath = w_xpath + "[nsr:ns-instance-config-ref='{}']/nsr:monitoring-param".format(nsr_id)
+            xpath =  w_xpath + "[nsr:id ='{}']".format(monp_id)
+
+            for i in range(self.mock_store.threshold_time + 1):
+                value = random.randint(floor, ceil)
+
+                monp = NsMonParam.from_dict({
+                        'id': monp_id,
+                        'value_integer': value,
+                        'nsd_mon_param_ref': monp_id})
+
+                yield from publisher.publish(w_xpath, xpath, monp)
+                yield from asyncio.sleep(1)
+
+    @rift.test.dts.async_test
+    def test_scale_in(self):
+        store = self.mock_store()
+
+        # CFG
+        floor, ceil = 0, 100
+        nsr_id = store.get_nsr().ns_instance_config_ref
+        policy_cfg = store.get_nsd().scaling_group_descriptor[0].scaling_policy[0]
+        scaling_name = store.get_nsd().scaling_group_descriptor[0].name
+
+
+        def make_policy():
+            policy = engine.ScalingPolicy(
+                    self.log, self.dts, self.loop,
+                    store.get_nsr().ns_instance_config_ref, store.get_nsd().id,
+                    scaling_name, policy_cfg, store, delegate=mock_delegate)
+
+            return policy
+
+        @asyncio.coroutine
+        def scale_out(policy):
+            yield from self._populate_mock_values(policy.scaling_criteria, nsr_id, 200, 300)
+            # HACK TO RESET THE COOLING TIME
+            policy._last_triggered_time = 0
+
+        # Test 1: Scale in shouldn't be called, unless a scale-out happens
+        mock_delegate = MockDelegate()
+        policy = make_policy()
+        yield from policy.register()
+        yield from self._populate_mock_values(policy.scaling_criteria, nsr_id, floor, ceil)
+        assert mock_delegate.scale_in_called == 0
+
+        # Test 2: AND operation 
+        yield from scale_out(policy)
+        yield from self._populate_mock_values(policy.scaling_criteria, nsr_id, floor, ceil)
+        assert mock_delegate.scale_in_called == 1
+
+        # Test 3: AND operation failure
+        mock_delegate = MockDelegate()
+        policy = make_policy()
+        yield from policy.register()
+        yield from scale_out(policy)
+        yield from self._populate_mock_values([policy.scaling_criteria[0]], nsr_id, floor, ceil)
+        assert mock_delegate.scale_in_called == 0
+
+
+        # Test 4: OR operation
+        mock_delegate = MockDelegate()
+        policy = make_policy()
+        policy_cfg.scale_in_operation_type = "OR"
+        yield from policy.register()
+        yield from scale_out(policy)
+        yield from self._populate_mock_values([policy.scaling_criteria[0]], nsr_id, floor, ceil)
+        assert mock_delegate.scale_in_called == 1
+
+    @rift.test.dts.async_test
+    def _test_scale_out(self):
+        """ Tests scale out
+
+        Asserts:
+            1. Scale out
+            2. Scale out doesn't happen during cooldown
+            3. AND operation 
+            4. OR operation.
+        """
+        store = self.mock_store()
+
+        # CFG
+        floor, ceil = 200, 300
+        nsr_id = store.get_nsr().ns_instance_config_ref
+        policy_cfg = store.get_nsd().scaling_group_descriptor[0].scaling_policy[0]
+        scaling_name = store.get_nsd().scaling_group_descriptor[0].name
+
+
+        def make_policy():
+            policy = engine.ScalingPolicy(
+                    self.log, self.dts, self.loop,
+                    store.get_nsr().ns_instance_config_ref, store.get_nsd().id,
+                    scaling_name, policy_cfg, store, delegate=mock_delegate)
+
+            return policy
+
+        # Test 1: Scale out should be called only when both the criteria are
+        # exceeding.
+        mock_delegate = MockDelegate()
+        policy = make_policy()
+        yield from policy.register()
+        yield from self._populate_mock_values(policy.scaling_criteria, nsr_id, floor, ceil)
+        assert mock_delegate.scale_out_called == 1
+
+        # Test 2: Assert if Scale out doesn't happen when only one exceeds
+        mock_delegate = MockDelegate()
+        policy = make_policy()
+        yield from policy.register()
+        yield from self._populate_mock_values([policy.scaling_criteria[0]], nsr_id, floor, ceil)
+        assert mock_delegate.scale_out_called == 0
+
+        # Test 3: OR operation
+        mock_delegate = MockDelegate()
+        policy_cfg.scale_out_operation_type = "OR"
+        policy = make_policy()
+        yield from policy.register()
+        yield from  self._populate_mock_values([policy.scaling_criteria[0]], nsr_id, floor, ceil)
+        assert mock_delegate.scale_out_called == 1
+
+
+def main():
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwimagemgr/CMakeLists.txt b/rwlaunchpad/plugins/rwimagemgr/CMakeLists.txt
new file mode 100644
index 0000000..58b3429
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/CMakeLists.txt
@@ -0,0 +1,94 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 2016/06/23
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwimagemgrtasklet)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/rwimagemgr/__init__.py
+    rift/tasklets/rwimagemgr/glance_client.py
+    rift/tasklets/rwimagemgr/glance_proxy_server.py
+    rift/tasklets/rwimagemgr/tasklet.py
+    rift/tasklets/rwimagemgr/upload.py
+    rift/tasklets/rwimagemgr/lib/__init__.py
+    rift/tasklets/rwimagemgr/lib/quickproxy/__init__.py
+    rift/tasklets/rwimagemgr/lib/quickproxy/proxy.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
+
+rift_python_install_tree(
+  FILES
+    rift/imagemgr/__init__.py
+    rift/imagemgr/client.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
+
+install(
+    PROGRAMS
+        bin/glance_start_wrapper
+    DESTINATION
+        usr/bin
+    COMPONENT ${PKG_LONG_NAME}
+    )
+
+if($ENV{RIFT_PLATFORM} MATCHES "fc20")
+  install(
+      FILES
+          etc/fc20/glance-api.conf
+          etc/fc20/glance-registry.conf
+          etc/fc20/glance-scrubber.conf
+          etc/fc20/glance-cache.conf
+          etc/fc20/policy.json
+          etc/fc20/schema-image.json
+          etc/fc20/glance-api-dist-paste.ini
+      DESTINATION
+          etc/glance
+      COMPONENT ${PKG_LONG_NAME}
+      )
+elseif($ENV{RIFT_PLATFORM} MATCHES "ub16")
+  install(
+      FILES
+          etc/ub16/glance-api.conf
+          etc/ub16/glance-api-paste.ini
+          etc/ub16/glance-registry.conf
+          etc/ub16/glance-registry-paste.ini
+          etc/ub16/glance-cache.conf
+          etc/ub16/glance-manage.conf
+          etc/ub16/policy.json
+          etc/ub16/schema-image.json
+      DESTINATION
+          etc/glance
+      COMPONENT ${PKG_LONG_NAME}
+      )
+else()
+    message(FATAL_ERROR "Unknown platform $ENV{RIFT_PLATFORM}")
+endif()
+
+rift_add_subdirs(test)
diff --git a/rwlaunchpad/plugins/rwimagemgr/Makefile b/rwlaunchpad/plugins/rwimagemgr/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwimagemgr/bin/glance_start_wrapper b/rwlaunchpad/plugins/rwimagemgr/bin/glance_start_wrapper
new file mode 100755
index 0000000..3294aee
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/bin/glance_start_wrapper
@@ -0,0 +1,131 @@
+#!/bin/bash
+#
+# A single executable which starts necessary glance server components
+#
+# Create a workspace-specific glance config directory and
+# wrap the glance-api and glance-registry procs.
+#
+#
+# USAGE: ./glance_start_wrapper <glance_conf_dir>
+#
+#
+
+if [ $# -ne 1 ]; then
+    echo "error: specifiy the glance conf dir"
+    exit 1
+fi
+
+src_conf_dir="$1"
+if [ ! -d "${src_conf_dir}" ]; then
+    echo "error: glance conf dir does not exist"
+    exit 1
+fi
+
+if [ -z ${RIFT_INSTALL+x} ]; then
+    echo "error: RIFT_INSTALL is not set"
+    exit 1
+fi
+
+if [ -z "${RIFT_VAR_ROOT}" ]; then
+    if [ -n "${RIFT_INSTALL}" ]; then
+        RIFT_VAR_ROOT="${RIFT_INSTALL}/var"
+    else
+        RIFT_VAR_ROOT="$(mktemp -d)"
+        echo "warning: RIFT_VAR_ROOT or RIFT_INSTALL not provided, using temporary directory"
+    fi
+fi
+
+dest_conf_dir="${RIFT_VAR_ROOT}/glance/conf"
+echo "destination glance conf directory: ${dest_conf_dir}"
+
+if [ -e "${dest_conf_dir}" ]; then
+    echo "removing ${dest_conf_dir}"
+    #rm -rf "${dest_conf_dir}"
+fi
+
+mkdir -p "${dest_conf_dir}"
+
+for conf_file in ${src_conf_dir}/*; do
+    cp "${conf_file}" ${dest_conf_dir}/
+    dest_file="${dest_conf_dir}/$(basename ${conf_file})"
+    sed -i "s|{RIFT_VAR_ROOT}|${RIFT_VAR_ROOT}|g" "${dest_file}"
+    sed -i "s|{RIFT_INSTALL}|${RIFT_INSTALL}|g" "${dest_file}"
+done
+
+mkdir -p ${RIFT_VAR_ROOT}/log/glance
+
+registry_pid=0
+api_pid=0
+killing=false
+
+function kill_children(){
+    if ${killing}; then
+        return
+    fi
+    killing=true
+
+    if [ ${registry_pid} -ne 0 ]; then
+        kill ${registry_pid} 2>/dev/null
+    fi
+
+    if [ ${api_pid} -ne 0 ]; then
+        kill ${api_pid} 2>/dev/null
+    fi
+
+    sleep 2
+
+    if [ ${registry_pid} -ne 0 ]; then
+        echo "KILL registry pid: ${registry_pid}"
+        kill -9 ${registry_pid} 2>/dev/null
+    fi
+
+    if [ ${api_pid} -ne 0 ]; then
+        echo "KILL api pid: ${api_pid}"
+        kill -9 ${api_pid} 2>/dev/null
+    fi
+
+    exit 1
+}
+
+
+function kill_group(){
+    # Kill any remaining children
+    kill_children
+
+    # Kill myself
+    kill -9 0
+}
+
+trap "kill_children" SIGHUP SIGINT SIGTERM SIGTRAP EXIT
+trap "kill_group" SIGCHLD
+
+glance-registry --config-dir ${dest_conf_dir} --config-file ${dest_conf_dir}/glance-registry.conf >/dev/null 2>&1&
+registry_pid="$!"
+if [ $? -ne 0 ]; then
+    echo "ERROR: Glance registry startup failed!" >&2
+    exit 1
+fi
+
+glance-api --config-dir ${dest_conf_dir} --config-file ${dest_conf_dir}/glance-api.conf >/dev/null 2>&1&
+api_pid="$!"
+if [ $? -ne 0 ]; then
+    echo "ERROR: Glance registry startup failed!" >&2
+    exit 1
+fi
+
+sleep 5
+
+manage_cfg=""
+if [ -e "${dest_conf_dir}/glance-manage.conf" ]; then
+    manage_cfg="--config-file ${dest_conf_dir}/glance-manage.conf"
+fi
+
+glance-manage --config-dir ${dest_conf_dir} ${manage_cfg} db_sync >/dev/null 2>&1&
+if [ $? -ne 0 ]; then
+    echo "ERROR: glance-manage db_sync failed" >&2
+    exit 1
+fi
+
+while true; do
+    sleep 1
+done
diff --git a/rwlaunchpad/plugins/rwimagemgr/bin/upload_image.py b/rwlaunchpad/plugins/rwimagemgr/bin/upload_image.py
new file mode 100755
index 0000000..3870c50
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/bin/upload_image.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import asyncio
+import logging
+import sys
+
+from rift.tasklets.rwimagemgr import tasklet, glance_client
+from rift.mano.cloud import accounts
+
+import gi
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwLog', '1.0')
+from gi.repository import (
+        RwCloudYang,
+        RwLog,
+        )
+
+openstack_info = {
+        'username': 'pluto',
+        'password': 'mypasswd',
+        'project_name': 'demo',
+        'auth_url': 'http://10.66.4.18:5000/v3',
+        'mgmt_network': 'private'
+        }
+
+
+def create_account(log):
+    account_msg = RwCloudYang.CloudAccount.from_dict(dict(
+        name="openstack",
+        account_type="openstack",
+        openstack=dict(
+            key=openstack_info["username"],
+            secret=openstack_info["password"],
+            tenant=openstack_info["project_name"],
+            auth_url=openstack_info["auth_url"]
+            )
+        )
+    )
+
+    account = accounts.CloudAccount(
+            log,
+            RwLog.Ctx.new(__file__),
+            account_msg
+            )
+
+    return account
+
+
+def parse_args(argv=sys.argv[1:]):
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--image-name", required=True)
+    parser.add_argument("--image-checksum", required=True)
+
+    return parser.parse_args()
+
+
+def main():
+    args = parse_args()
+    logging.basicConfig(level=logging.DEBUG)
+    log = logging.getLogger("upload_image.py")
+    loop = asyncio.get_event_loop()
+    cloud_account = create_account(log)
+    client = glance_client.OpenstackGlanceClient.from_token(
+            log, "127.0.0.1", 9292, "test"
+            )
+    task_creator = tasklet.GlanceClientUploadTaskCreator(
+            log, loop, {"openstack": cloud_account}, client,
+            )
+
+    tasks = loop.run_until_complete(
+            task_creator.create_tasks(
+                ["openstack"],
+                args.image_name,
+                args.image_checksum
+                )
+            )
+
+    log.debug("Created tasks: %s", tasks)
+
+    log.debug("uploading images")
+    loop.run_until_complete(asyncio.wait([t.start() for t in tasks], loop=loop))
+
+
+if __name__ == "__main__":
+    main()
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-api-dist-paste.ini b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-api-dist-paste.ini
new file mode 100644
index 0000000..4f8f659
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-api-dist-paste.ini
@@ -0,0 +1,72 @@
+# Use this pipeline for no auth or image caching - DEFAULT
+[pipeline:glance-api]
+pipeline = versionnegotiation unauthenticated-context rootapp
+
+# Use this pipeline for image caching and no auth
+[pipeline:glance-api-caching]
+pipeline = versionnegotiation unauthenticated-context cache rootapp
+
+# Use this pipeline for caching w/ management interface but no auth
+[pipeline:glance-api-cachemanagement]
+pipeline = versionnegotiation unauthenticated-context cache cachemanage rootapp
+
+# Use this pipeline for keystone auth
+[pipeline:glance-api-keystone]
+pipeline = versionnegotiation authtoken context rootapp
+
+# Use this pipeline for keystone auth with image caching
+[pipeline:glance-api-keystone+caching]
+pipeline = versionnegotiation authtoken context cache rootapp
+
+# Use this pipeline for keystone auth with caching and cache management
+[pipeline:glance-api-keystone+cachemanagement]
+pipeline = versionnegotiation authtoken context cache cachemanage rootapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user.
+[pipeline:glance-api-trusted-auth]
+pipeline = versionnegotiation context rootapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user and uses cache management
+[pipeline:glance-api-trusted-auth+cachemanagement]
+pipeline = versionnegotiation context cache cachemanage rootapp
+
+[composite:rootapp]
+paste.composite_factory = glance.api:root_app_factory
+/: apiversions
+/v1: apiv1app
+/v2: apiv2app
+
+[app:apiversions]
+paste.app_factory = glance.api.versions:create_resource
+
+[app:apiv1app]
+paste.app_factory = glance.api.v1.router:API.factory
+
+[app:apiv2app]
+paste.app_factory = glance.api.v2.router:API.factory
+
+[filter:versionnegotiation]
+paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
+
+[filter:cache]
+paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
+
+[filter:cachemanage]
+paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory
+
+[filter:context]
+paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
+
+[filter:unauthenticated-context]
+paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
+
+[filter:authtoken]
+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
+delay_auth_decision = true
+
+[filter:gzip]
+paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-api.conf b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-api.conf
new file mode 100644
index 0000000..4f11820
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-api.conf
@@ -0,0 +1,446 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+verbose=True
+
+# Show debugging output in logs (sets DEBUG log level output)
+debug=True
+
+# Which backend scheme should Glance use by default is not specified
+# in a request to add a new image to Glance? Known schemes are determined
+# by the known_stores option below.
+# Default: 'file'
+default_store = file
+
+# List of which store classes and store class locations are
+# currently known to glance at startup.
+#known_stores = glance.store.filesystem.Store,
+#               glance.store.http.Store,
+#               glance.store.rbd.Store,
+#               glance.store.s3.Store,
+#               glance.store.swift.Store,
+#               glance.store.sheepdog.Store,
+#               glance.store.cinder.Store,
+
+
+# Maximum image size (in bytes) that may be uploaded through the
+# Glance API server. Defaults to 1 TB.
+# WARNING: this value should only be increased after careful consideration
+# and must be set to a value under 8 EB (9223372036854775808).
+#image_size_cap = 1099511627776
+
+# Address to bind the API server
+bind_host = 0.0.0.0
+
+# Port the bind the API server to
+bind_port = 9292
+
+# Log to this file. Make sure you do not set the same log
+# file for both the API and registry servers!
+log_file={RIFT_VAR_ROOT}/log/glance/api.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package, it is also possible to use: glance.db.registry.api
+data_api = glance.db.sqlalchemy.api
+
+# SQLAlchemy connection string for the reference implementation
+# registry server. Any valid SQLAlchemy connection string is fine.
+# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
+#sql_connection=mysql://glance:glance@localhost/glance
+sql_connection=sqlite:///{RIFT_VAR_ROOT}/glance/glance-api.db
+
+# Period in seconds after which SQLAlchemy should reestablish its connection
+# to the database.
+#
+# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
+# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
+# notice this, you can lower this value to ensure that SQLAlchemy reconnects
+# before MySQL can drop the connection.
+sql_idle_timeout = 3600
+
+# Number of Glance API worker processes to start.
+# On machines with more than one CPU increasing this value
+# may improve performance (especially if using SSL with
+# compression turned on). It is typically recommended to set
+# this value to the number of CPUs present on your machine.
+workers = 1
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Allow unauthenticated users to access the API with read-only
+# privileges. This only applies when using ContextMiddleware.
+allow_anonymous_access = True
+
+# Allow access to version 1 of glance api
+enable_v1_api = True
+
+# Allow access to version 2 of glance api
+enable_v2_api = True
+
+# Return the URL that references where the data is stored on
+# the backend storage system.  For example, if using the
+# file system store a URL of 'file:///path/to/image' will
+# be returned to the user in the 'direct_url' meta-data field.
+# The default value is false.
+#show_image_direct_url = False
+
+# Send headers containing user and tenant information when making requests to
+# the v1 glance registry. This allows the registry to function as if a user is
+# authenticated without the need to authenticate a user itself using the
+# auth_token middleware.
+# The default value is false.
+#send_identity_headers = False
+
+# Supported values for the 'container_format' image attribute
+container_formats=ami,ari,aki,bare,ovf
+
+# Supported values for the 'disk_format' image attribute
+disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+#
+# Property Protections config file
+# This file contains the rules for property protections and the roles
+# associated with it.
+# If this config value is not specified, by default, property protections
+# won't be enforced.
+# If a value is specified and the file is not found, then an
+# HTTPInternalServerError will be thrown.
+#property_protection_file =
+
+# Set a system wide quota for every user.  This value is the total number
+# of bytes that a user can use across all storage systems.  A value of
+# 0 means unlimited.
+#user_storage_quota = 0
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+#use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL0
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting API server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
+
+# ============ Registry Options ===============================
+
+# Address to find the registry server
+registry_host = 0.0.0.0
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# What protocol to use when connecting to the registry server?
+# Set to https for secure HTTP communication
+registry_client_protocol = http
+
+# The path to the key file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
+#registry_client_key_file = /path/to/key/file
+
+# The path to the cert file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
+#registry_client_cert_file = /path/to/cert/file
+
+# The path to the certifying authority cert file to use in SSL connections
+# to the registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
+#registry_client_ca_file = /path/to/ca/file
+
+# When using SSL in connections to the registry server, do not require
+# validation via a certifying authority. This is the registry's equivalent of
+# specifying --insecure on the command line using glanceclient for the API
+# Default: False
+#registry_client_insecure = False
+
+# The period of time, in seconds, that the API server will wait for a registry
+# request to complete. A value of '0' implies no timeout.
+# Default: 600
+#registry_client_timeout = 600
+
+# Whether to automatically create the database tables.
+# Default: False
+db_auto_create = True
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+sqlalchemy_debug = True
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when images are create, updated or deleted.
+# There are three methods of sending notifications, logging (via the
+# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid
+# message queue), or noop (no notifications sent, the default)
+notifier_strategy=noop
+
+# Configuration options if sending notifications via rabbitmq (these are
+# the defaults)
+rabbit_host = localhost
+rabbit_port = 5672
+rabbit_use_ssl = false
+rabbit_userid = guest
+rabbit_password = guest
+rabbit_virtual_host = /
+rabbit_notification_exchange = glance
+rabbit_notification_topic = notifications
+rabbit_durable_queues = False
+
+# Configuration options if sending notifications via Qpid (these are
+# the defaults)
+qpid_notification_exchange = glance
+qpid_notification_topic = notifications
+qpid_hostname = localhost
+qpid_port = 5672
+qpid_username =
+qpid_password =
+qpid_sasl_mechanisms =
+qpid_reconnect_timeout = 0
+qpid_reconnect_limit = 0
+qpid_reconnect_interval_min = 0
+qpid_reconnect_interval_max = 0
+qpid_reconnect_interval = 0
+#qpid_heartbeat=60
+# Set to 'ssl' to enable SSL
+qpid_protocol = tcp
+qpid_tcp_nodelay = True
+
+# ============ Filesystem Store Options ========================
+
+# Directory that the Filesystem backend store
+# writes image data to
+filesystem_store_datadir={RIFT_VAR_ROOT}/glance/images/
+
+# A path to a JSON file that contains metadata describing the storage
+# system.  When show_multiple_locations is True the information in this
+# file will be returned with any location that is contained in this
+# store.
+#filesystem_store_metadata_file = None
+
+# ============ Swift Store Options =============================
+
+# Version of the authentication service to use
+# Valid versions are '2' for keystone and '1' for swauth and rackspace
+swift_store_auth_version = 2
+
+# Address where the Swift authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified,  default to 'https://'
+# For swauth, use something like '127.0.0.1:8080/v1.0/'
+swift_store_auth_address = 127.0.0.1:5000/v2.0/
+
+# User to authenticate against the Swift authentication service
+# If you use Swift authentication service, set it to 'account':'user'
+# where 'account' is a Swift storage account and 'user'
+# is a user in that account
+swift_store_user = jdoe:jdoe
+
+# Auth key for the user authenticating against the
+# Swift authentication service
+swift_store_key = a86850deb2742ec3cb41518e26aa2d89
+
+# Container within the account that the account should use
+# for storing images in Swift
+swift_store_container = glance
+
+# Do we create the container if it does not exist?
+swift_store_create_container_on_put = False
+
+# What size, in MB, should Glance start chunking image files
+# and do a large object manifest in Swift? By default, this is
+# the maximum object size in Swift, which is 5GB
+swift_store_large_object_size = 5120
+
+# When doing a large object manifest, what size, in MB, should
+# Glance write chunks to Swift? This amount of data is written
+# to a temporary disk buffer during the process of chunking
+# the image file, and the default is 200MB
+swift_store_large_object_chunk_size = 200
+
+# Whether to use ServiceNET to communicate with the Swift storage servers.
+# (If you aren't RACKSPACE, leave this False!)
+#
+# To use ServiceNET for authentication, prefix hostname of
+# `swift_store_auth_address` with 'snet-'.
+# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
+swift_enable_snet = False
+
+# If set to True enables multi-tenant storage mode which causes Glance images
+# to be stored in tenant specific Swift accounts.
+#swift_store_multi_tenant = False
+
+# A list of swift ACL strings that will be applied as both read and
+# write ACLs to the containers created by Glance in multi-tenant
+# mode. This grants the specified tenants/users read and write access
+# to all newly created image objects. The standard swift ACL string
+# formats are allowed, including:
+# <tenant_id>:<username>
+# <tenant_name>:<username>
+# *:<username>
+# Multiple ACLs can be combined using a comma separated list, for
+# example: swift_store_admin_tenants = service:glance,*:admin
+#swift_store_admin_tenants =
+
+# The region of the swift endpoint to be used for single tenant. This setting
+# is only necessary if the tenant has multiple swift endpoints.
+#swift_store_region =
+
+# If set to False, disables SSL layer compression of https swift requests.
+# Setting to 'False' may improve performance for images which are already
+# in a compressed format, eg qcow2. If set to True, enables SSL layer
+# compression (provided it is supported by the target swift proxy).
+#swift_store_ssl_compression = True
+
+# ============ S3 Store Options =============================
+
+# Address where the S3 authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified,  default to 'http://'
+s3_store_host = 127.0.0.1:8080/v1.0/
+
+# User to authenticate against the S3 authentication service
+s3_store_access_key = <20-char AWS access key>
+
+# Auth key for the user authenticating against the
+# S3 authentication service
+s3_store_secret_key = <40-char AWS secret key>
+
+# Container within the account that the account should use
+# for storing images in S3. Note that S3 has a flat namespace,
+# so you need a unique bucket name for your glance images. An
+# easy way to do this is append your AWS access key to "glance".
+# S3 buckets in AWS *must* be lowercased, so remember to lowercase
+# your AWS access key if you use it in your bucket name below!
+s3_store_bucket = <lowercased 20-char aws access key>glance
+
+# Do we create the bucket if it does not exist?
+s3_store_create_bucket_on_put = False
+
+# When sending images to S3, the data will first be written to a
+# temporary buffer on disk. By default the platform's temporary directory
+# will be used. If required, an alternative directory can be specified here.
+#s3_store_object_buffer_dir = /path/to/dir
+
+# When forming a bucket url, boto will either set the bucket name as the
+# subdomain or as the first token of the path. Amazon's S3 service will
+# accept it as the subdomain, but Swift's S3 middleware requires it be
+# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
+#s3_store_bucket_url_format = subdomain
+
+# ============ RBD Store Options =============================
+
+# Ceph configuration file path
+# If using cephx authentication, this file should
+# include a reference to the right keyring
+# in a client.<USER> section
+rbd_store_ceph_conf = /etc/ceph/ceph.conf
+
+# RADOS user to authenticate as (only applicable if using cephx)
+rbd_store_user = glance
+
+# RADOS pool in which images are stored
+rbd_store_pool = images
+
+# Images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+rbd_store_chunk_size = 8
+
+# ============ Sheepdog Store Options =============================
+
+sheepdog_store_address = localhost
+
+sheepdog_store_port = 7000
+
+# Images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+sheepdog_store_chunk_size = 64
+
+# ============ Cinder Store Options ===============================
+
+# Info to match when looking for cinder in the service catalog
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v1/%(project_id)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node (string value)
+#os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = False
+
+# ============ Delayed Delete Options =============================
+
+# Turn on/off delayed delete
+delayed_delete = False
+
+# Delayed delete time in seconds
+scrub_time = 43200
+
+# Directory that the scrubber will use to remind itself of what to delete
+# Make sure this is also set in glance-scrubber.conf
+scrubber_datadir={RIFT_VAR_ROOT}/glance/scrubber
+
+# =============== Image Cache Options =============================
+
+# Base directory that the Image Cache uses
+image_cache_dir={RIFT_VAR_ROOT}/glance/image-cache/
+
+[keystone_authtoken]
+#auth_host=127.0.0.1
+#auth_port=35357
+#auth_protocol=http
+#admin_tenant_name=%SERVICE_TENANT_NAME%
+#admin_user=%SERVICE_USER%
+#admin_password=%SERVICE_PASSWORD%
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+config_file={RIFT_INSTALL}/etc/glance/glance-api-dist-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-api-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor=
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-cache.conf b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-cache.conf
new file mode 100644
index 0000000..904eb7f
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-cache.conf
@@ -0,0 +1,168 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+verbose=True
+
+# Show debugging output in logs (sets DEBUG log level output)
+debug=False
+
+log_file={RIFT_VAR_ROOT}/log/glance/image-cache.log
+
+# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
+#use_syslog = False
+
+# Directory that the Image Cache writes data to
+image_cache_dir={RIFT_VAR_ROOT}/glance/image-cache/
+
+# Number of seconds after which we should consider an incomplete image to be
+# stalled and eligible for reaping
+image_cache_stall_time = 86400
+
+# image_cache_invalid_entry_grace_period - seconds
+#
+# If an exception is raised as we're writing to the cache, the cache-entry is
+# deemed invalid and moved to <image_cache_datadir>/invalid so that it can be
+# inspected for debugging purposes.
+#
+# This is number of seconds to leave these invalid images around before they
+# are elibible to be reaped.
+image_cache_invalid_entry_grace_period = 3600
+
+# Max cache size in bytes
+image_cache_max_size = 10737418240
+
+# Address to find the registry server
+registry_host = 127.0.0.1
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# Auth settings if using Keystone
+# auth_url = http://127.0.0.1:5000/v2.0/
+# admin_tenant_name = %SERVICE_TENANT_NAME%
+# admin_user = %SERVICE_USER%
+# admin_password = %SERVICE_PASSWORD%
+
+# List of which store classes and store class locations are
+# currently known to glance at startup.
+# known_stores = glance.store.filesystem.Store,
+#                glance.store.http.Store,
+#                glance.store.rbd.Store,
+#                glance.store.s3.Store,
+#                glance.store.swift.Store,
+#                glance.store.sheepdog.Store,
+#                glance.store.cinder.Store,
+
+# ============ Filesystem Store Options ========================
+
+# Directory that the Filesystem backend store
+# writes image data to
+filesystem_store_datadir = {RIFT_VAR_ROOT}/glance/images/
+
+# ============ Swift Store Options =============================
+
+# Version of the authentication service to use
+# Valid versions are '2' for keystone and '1' for swauth and rackspace
+swift_store_auth_version = 2
+
+# Address where the Swift authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified,  default to 'https://'
+# For swauth, use something like '127.0.0.1:8080/v1.0/'
+swift_store_auth_address = 127.0.0.1:5000/v2.0/
+
+# User to authenticate against the Swift authentication service
+# If you use Swift authentication service, set it to 'account':'user'
+# where 'account' is a Swift storage account and 'user'
+# is a user in that account
+swift_store_user = jdoe:jdoe
+
+# Auth key for the user authenticating against the
+# Swift authentication service
+swift_store_key = a86850deb2742ec3cb41518e26aa2d89
+
+# Container within the account that the account should use
+# for storing images in Swift
+swift_store_container = glance
+
+# Do we create the container if it does not exist?
+swift_store_create_container_on_put = False
+
+# What size, in MB, should Glance start chunking image files
+# and do a large object manifest in Swift? By default, this is
+# the maximum object size in Swift, which is 5GB
+swift_store_large_object_size = 5120
+
+# When doing a large object manifest, what size, in MB, should
+# Glance write chunks to Swift? This amount of data is written
+# to a temporary disk buffer during the process of chunking
+# the image file, and the default is 200MB
+swift_store_large_object_chunk_size = 200
+
+# Whether to use ServiceNET to communicate with the Swift storage servers.
+# (If you aren't RACKSPACE, leave this False!)
+#
+# To use ServiceNET for authentication, prefix hostname of
+# `swift_store_auth_address` with 'snet-'.
+# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
+swift_enable_snet = False
+
+# ============ S3 Store Options =============================
+
+# Address where the S3 authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified,  default to 'http://'
+s3_store_host = 127.0.0.1:8080/v1.0/
+
+# User to authenticate against the S3 authentication service
+s3_store_access_key = <20-char AWS access key>
+
+# Auth key for the user authenticating against the
+# S3 authentication service
+s3_store_secret_key = <40-char AWS secret key>
+
+# Container within the account that the account should use
+# for storing images in S3. Note that S3 has a flat namespace,
+# so you need a unique bucket name for your glance images. An
+# easy way to do this is append your AWS access key to "glance".
+# S3 buckets in AWS *must* be lowercased, so remember to lowercase
+# your AWS access key if you use it in your bucket name below!
+s3_store_bucket = <lowercased 20-char aws access key>glance
+
+# Do we create the bucket if it does not exist?
+s3_store_create_bucket_on_put = False
+
+# When sending images to S3, the data will first be written to a
+# temporary buffer on disk. By default the platform's temporary directory
+# will be used. If required, an alternative directory can be specified here.
+# s3_store_object_buffer_dir = /path/to/dir
+
+# ============ Cinder Store Options ===========================
+
+# Info to match when looking for cinder in the service catalog
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v1/%(project_id)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node (string value)
+#os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = False
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+# metadata_encryption_key = <16, 24 or 32 char registry metadata key>
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-registry.conf b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-registry.conf
new file mode 100644
index 0000000..2529d1c
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-registry.conf
@@ -0,0 +1,100 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+verbose=True
+
+# Show debugging output in logs (sets DEBUG log level output)
+debug=True
+
+# Address to bind the registry server
+bind_host = 0.0.0.0
+
+# Port the bind the registry server to
+bind_port = 9191
+
+# Log to this file. Make sure you do not set the same log
+# file for both the API and registry servers!
+log_file={RIFT_VAR_ROOT}/log/glance/glance-registry.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package.
+data_api = glance.db.sqlalchemy.api
+
+# SQLAlchemy connection string for the reference implementation
+# registry server. Any valid SQLAlchemy connection string is fine.
+# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
+#sql_connection=mysql://glance:glance@localhost/glance
+sql_connection=sqlite:///{RIFT_VAR_ROOT}/glance/glance-registry.db
+
+# Period in seconds after which SQLAlchemy should reestablish its connection
+# to the database.
+#
+# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
+# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
+# notice this, you can lower this value to ensure that SQLAlchemy reconnects
+# before MySQL can drop the connection.
+sql_idle_timeout = 3600
+
+# Limit the api to return `param_limit_max` items in a call to a container. If
+# a larger `limit` query param is provided, it will be reduced to this value.
+api_limit_max = 1000
+
+# If a `limit` query param is not provided in an api request, it will
+# default to `limit_param_default`
+limit_param_default = 25
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Whether to automatically create the database tables.
+# Default: False
+db_auto_create = True
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+sqlalchemy_debug = True
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+#use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL1
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting registry server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting registry server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+[keystone_authtoken]
+#auth_host=127.0.0.1
+#auth_port=35357
+#auth_protocol=http
+#admin_tenant_name=%SERVICE_TENANT_NAME%
+#admin_user=%SERVICE_USER%
+#admin_password=%SERVICE_PASSWORD%
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+config_file=/usr/share/glance/glance-registry-dist-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-registry-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor=
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-scrubber.conf b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-scrubber.conf
new file mode 100644
index 0000000..70b693b
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-scrubber.conf
@@ -0,0 +1,53 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+verbose=True
+
+# Show debugging output in logs (sets DEBUG log level output)
+debug=False
+
+# Log to this file. Make sure you do not set the same log
+# file for both the API and registry servers!
+log_file={RIFT_VAR_ROOT}/log/glance/scrubber.log
+
+# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
+#use_syslog = False
+
+# Should we run our own loop or rely on cron/scheduler to run us
+daemon = False
+
+# Loop time between checking for new items to schedule for delete
+wakeup_time = 300
+
+# Directory that the scrubber will use to remind itself of what to delete
+# Make sure this is also set in glance-api.conf
+scrubber_datadir={RIFT_VAR_ROOT}/lib/glance/scrubber
+
+# Only one server in your deployment should be designated the cleanup host
+cleanup_scrubber = False
+
+# pending_delete items older than this time are candidates for cleanup
+cleanup_scrubber_time = 86400
+
+# Address to find the registry server for cleanups
+registry_host = 0.0.0.0
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# Auth settings if using Keystone
+# auth_url = http://127.0.0.1:5000/v2.0/
+# admin_tenant_name = %SERVICE_TENANT_NAME%
+# admin_user = %SERVICE_USER%
+# admin_password = %SERVICE_PASSWORD%
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/fc20/policy.json b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/policy.json
new file mode 100644
index 0000000..248b27e
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/policy.json
@@ -0,0 +1,5 @@
+{
+    "context_is_admin":  "role:admin",
+    "default": "",
+    "manage_image_cache": "role:admin"
+}
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/fc20/schema-image.json b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/schema-image.json
new file mode 100644
index 0000000..5aafd6b
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/schema-image.json
@@ -0,0 +1,28 @@
+{
+    "kernel_id": {
+        "type": "string",
+        "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+        "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image."
+    },
+    "ramdisk_id": {
+        "type": "string",
+        "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+        "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image."
+    },
+    "instance_uuid": {
+        "type": "string",
+        "description": "ID of instance used to create this image."
+    },
+    "architecture": {
+        "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
+        "type": "string"
+    },
+    "os_distro": {
+        "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
+        "type": "string"
+    },
+    "os_version": {
+        "description": "Operating system version as specified by the distributor",
+        "type": "string"
+    }
+}
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-api-paste.ini b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-api-paste.ini
new file mode 100644
index 0000000..9efd19f
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-api-paste.ini
@@ -0,0 +1,87 @@
+# Use this pipeline for no auth or image caching - DEFAULT
+[pipeline:glance-api]
+pipeline = cors healthcheck versionnegotiation osprofiler unauthenticated-context rootapp
+
+# Use this pipeline for image caching and no auth
+[pipeline:glance-api-caching]
+pipeline = cors healthcheck versionnegotiation osprofiler unauthenticated-context cache rootapp
+
+# Use this pipeline for caching w/ management interface but no auth
+[pipeline:glance-api-cachemanagement]
+pipeline = cors healthcheck versionnegotiation osprofiler unauthenticated-context cache cachemanage rootapp
+
+# Use this pipeline for keystone auth
+[pipeline:glance-api-keystone]
+pipeline = cors healthcheck versionnegotiation osprofiler authtoken context  rootapp
+
+# Use this pipeline for keystone auth with image caching
+[pipeline:glance-api-keystone+caching]
+pipeline = cors healthcheck versionnegotiation osprofiler authtoken context cache rootapp
+
+# Use this pipeline for keystone auth with caching and cache management
+[pipeline:glance-api-keystone+cachemanagement]
+pipeline = cors healthcheck versionnegotiation osprofiler authtoken context cache cachemanage rootapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user.
+[pipeline:glance-api-trusted-auth]
+pipeline = cors healthcheck versionnegotiation osprofiler context rootapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user and uses cache management
+[pipeline:glance-api-trusted-auth+cachemanagement]
+pipeline = cors healthcheck versionnegotiation osprofiler context cache cachemanage rootapp
+
+[composite:rootapp]
+paste.composite_factory = glance.api:root_app_factory
+/: apiversions
+/v1: apiv1app
+/v2: apiv2app
+
+[app:apiversions]
+paste.app_factory = glance.api.versions:create_resource
+
+[app:apiv1app]
+paste.app_factory = glance.api.v1.router:API.factory
+
+[app:apiv2app]
+paste.app_factory = glance.api.v2.router:API.factory
+
+[filter:healthcheck]
+paste.filter_factory = oslo_middleware:Healthcheck.factory
+backends = disable_by_file
+disable_by_file_path = /etc/glance/healthcheck_disable
+
+[filter:versionnegotiation]
+paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
+
+[filter:cache]
+paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
+
+[filter:cachemanage]
+paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory
+
+[filter:context]
+paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
+
+[filter:unauthenticated-context]
+paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+delay_auth_decision = true
+
+[filter:gzip]
+paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY  #DEPRECATED
+enabled = yes  #DEPRECATED
+
+[filter:cors]
+paste.filter_factory =  oslo_middleware.cors:filter_factory
+oslo_config_project = glance
+oslo_config_program = glance-api
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-api.conf b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-api.conf
new file mode 100644
index 0000000..65e2e8d
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-api.conf
@@ -0,0 +1,1783 @@
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+[DEFAULT]
+
+#
+# From glance.api
+#
+
+# When true, this option sets the owner of an image to be the tenant.
+# Otherwise, the owner of the  image will be the authenticated user
+# issuing the request. (boolean value)
+#owner_is_tenant = true
+
+# Role used to identify an authenticated user as administrator.
+# (string value)
+#admin_role = admin
+
+# Allow unauthenticated users to access the API with read-only
+# privileges. This only applies when using ContextMiddleware. (boolean
+# value)
+allow_anonymous_access = True
+
+# Limits request ID length. (integer value)
+#max_request_id_length = 64
+
+# Public url to use for versions endpoint. The default is None, which
+# will use the request's host_url attribute to populate the URL base.
+# If Glance is operating behind a proxy, you will want to change this
+# to represent the proxy's URL. (string value)
+#public_endpoint = <None>
+
+# Whether to allow users to specify image properties beyond what the
+# image schema provides (boolean value)
+#allow_additional_image_properties = true
+
+# Maximum number of image members per image. Negative values evaluate
+# to unlimited. (integer value)
+#image_member_quota = 128
+
+# Maximum number of properties allowed on an image. Negative values
+# evaluate to unlimited. (integer value)
+#image_property_quota = 128
+
+# Maximum number of tags allowed on an image. Negative values evaluate
+# to unlimited. (integer value)
+#image_tag_quota = 128
+
+# Maximum number of locations allowed on an image. Negative values
+# evaluate to unlimited. (integer value)
+#image_location_quota = 10
+
+# Python module path of data access API (string value)
+data_api = glance.db.sqlalchemy.api
+
+# Default value for the number of items returned by a request if not
+# specified explicitly in the request (integer value)
+#limit_param_default = 25
+
+# Maximum permissible number of items that could be returned by a
+# request (integer value)
+#api_limit_max = 1000
+
+# Whether to include the backend image storage location in image
+# properties. Revealing storage location can be a security risk, so
+# use this setting with caution! (boolean value)
+#show_image_direct_url = false
+
+# Whether to include the backend image locations in image properties.
+# For example, if using the file system store a URL of
+# "file:///path/to/image" will be returned to the user in the
+# 'direct_url' meta-data field. Revealing storage location can be a
+# security risk, so use this setting with caution! Setting this to
+# true overrides the show_image_direct_url option. (boolean value)
+#show_multiple_locations = false
+
+# Maximum size of image a user can upload in bytes. Defaults to
+# 1099511627776 bytes (1 TB).WARNING: this value should only be
+# increased after careful consideration and must be set to a value
+# under 8 EB (9223372036854775808). (integer value)
+# Maximum value: 9223372036854775808
+#image_size_cap = 1099511627776
+
+# Set a system wide quota for every user. This value is the total
+# capacity that a user can use across all storage systems. A value of
+# 0 means unlimited.Optional unit can be specified for the value.
+# Accepted units are B, KB, MB, GB and TB representing Bytes,
+# KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. If no
+# unit is specified then Bytes is assumed. Note that there should not
+# be any space between value and unit and units are case sensitive.
+# (string value)
+#user_storage_quota = 0
+
+# Deploy the v1 OpenStack Images API. (boolean value)
+enable_v1_api = true
+
+# Deploy the v2 OpenStack Images API. (boolean value)
+enable_v2_api = true
+
+# Deploy the v1 OpenStack Registry API. (boolean value)
+enable_v1_registry = true
+
+# Deploy the v2 OpenStack Registry API. (boolean value)
+enable_v2_registry = true
+
+# The hostname/IP of the pydev process listening for debug connections
+# (string value)
+#pydev_worker_debug_host = <None>
+
+# The port on which a pydev process is listening for connections.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#pydev_worker_debug_port = 5678
+
+# AES key for encrypting store 'location' metadata. This includes, if
+# used, Swift or S3 credentials. Should be set to a random string of
+# length 16, 24 or 32 bytes (string value)
+#metadata_encryption_key = <None>
+
+# Digest algorithm which will be used for digital signature. Use the
+# command "openssl list-message-digest-algorithms" to get the
+# available algorithms supported by the version of OpenSSL on the
+# platform. Examples are "sha1", "sha256", "sha512", etc. (string
+# value)
+#digest_algorithm = sha256
+
+# This value sets what strategy will be used to determine the image
+# location order. Currently two strategies are packaged with Glance
+# 'location_order' and 'store_type'. (string value)
+# Allowed values: location_order, store_type
+#location_strategy = location_order
+
+# The location of the property protection file.This file contains the
+# rules for property protections and the roles/policies associated
+# with it. If this config value is not specified, by default, property
+# protections won't be enforced. If a value is specified and the file
+# is not found, then the glance-api service will not start. (string
+# value)
+#property_protection_file = <None>
+
+# This config value indicates whether "roles" or "policies" are used
+# in the property protection file. (string value)
+# Allowed values: roles, policies
+#property_protection_rule_format = roles
+
+# Modules of exceptions that are permitted to be recreated upon
+# receiving exception data from an rpc call. (list value)
+#allowed_rpc_exception_modules = glance.common.exception,builtins,exceptions
+
+# Address to bind the server.  Useful when selecting a particular
+# network interface. (string value)
+bind_host = 0.0.0.0
+
+# The port on which the server will listen. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+bind_port = 9292
+
+# The number of child process workers that will be created to service
+# requests. The default will be equal to the number of CPUs available.
+# (integer value)
+workers = 1
+
+# Maximum line size of message headers to be accepted. max_header_line
+# may need to be increased when using large tokens (typically those
+# generated by the Keystone v3 API with big service catalogs (integer
+# value)
+#max_header_line = 16384
+
+# If False, server will return the header "Connection: close", If
+# True, server will return "Connection: Keep-Alive" in its responses.
+# In order to close the client socket connection explicitly after the
+# response is sent and read successfully by the client, you simply
+# have to set this option to False when you create a wsgi server.
+# (boolean value)
+#http_keepalive = true
+
+# Timeout for client connections' socket operations. If an incoming
+# connection is idle for this number of seconds it will be closed. A
+# value of '0' means wait forever. (integer value)
+#client_socket_timeout = 900
+
+# The backlog value that will be used when creating the TCP listener
+# socket. (integer value)
+#backlog = 4096
+
+# The value for the socket option TCP_KEEPIDLE.  This is the time in
+# seconds that the connection must be idle before TCP starts sending
+# keepalive probes. (integer value)
+#tcp_keepidle = 600
+
+# CA certificate file to use to verify connecting clients. (string
+# value)
+#ca_file = <None>
+
+# Certificate file to use when starting API server securely. (string
+# value)
+#cert_file = <None>
+
+# Private key file to use when starting API server securely. (string
+# value)
+#key_file = <None>
+
+# The path to the sqlite file database that will be used for image
+# cache management. (string value)
+#image_cache_sqlite_db = cache.db
+
+# The driver to use for image cache management. (string value)
+#image_cache_driver = sqlite
+
+# The upper limit (the maximum size of accumulated cache in bytes)
+# beyond which the cache pruner, if running, starts cleaning the image
+# cache. (integer value)
+#image_cache_max_size = 10737418240
+
+# The amount of time to let an incomplete image remain in the cache,
+# before the cache cleaner, if running, will remove the incomplete
+# image. (integer value)
+#image_cache_stall_time = 86400
+
+# Base directory that the image cache uses. (string value)
+image_cache_dir = {RIFT_VAR_ROOT}/glance/image-cache/
+
+# Default publisher_id for outgoing notifications. (string value)
+#default_publisher_id = image.localhost
+
+# List of disabled notifications. A notification can be given either
+# as a notification type to disable a single event, or as a
+# notification group prefix to disable all events within a group.
+# Example: if this config option is set to ["image.create",
+# "metadef_namespace"], then "image.create" notification will not be
+# sent after image is created and none of the notifications for
+# metadefinition namespaces will be sent. (list value)
+#disabled_notifications =
+
+# Address to find the registry server. (string value)
+registry_host = 0.0.0.0
+
+# Port the registry server is listening on. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+registry_port = 9191
+
+# Whether to pass through the user token when making requests to the
+# registry. To prevent failures with token expiration during big files
+# upload, it is recommended to set this parameter to False.If
+# "use_user_token" is not in effect, then admin credentials can be
+# specified. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#use_user_token = true
+
+# The administrators user name. If "use_user_token" is not in effect,
+# then admin credentials can be specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#admin_user = <None>
+
+# The administrators password. If "use_user_token" is not in effect,
+# then admin credentials can be specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#admin_password = <None>
+
+# The tenant name of the administrative user. If "use_user_token" is
+# not in effect, then admin tenant name can be specified. (string
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#admin_tenant_name = <None>
+
+# The URL to the keystone service. If "use_user_token" is not in
+# effect and using keystone auth, then URL of keystone can be
+# specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#auth_url = <None>
+
+# The strategy to use for authentication. If "use_user_token" is not
+# in effect, then auth strategy can be specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#auth_strategy = noauth
+
+# The region for the authentication service. If "use_user_token" is
+# not in effect and using keystone auth, then region name can be
+# specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#auth_region = <None>
+
+# The protocol to use for communication with the registry server.
+# Either http or https. (string value)
+#registry_client_protocol = http
+
+# The path to the key file to use in SSL connections to the registry
+# server, if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE
+# environment variable to a filepath of the key file (string value)
+#registry_client_key_file = <None>
+
+# The path to the cert file to use in SSL connections to the registry
+# server, if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE
+# environment variable to a filepath of the CA cert file (string
+# value)
+#registry_client_cert_file = <None>
+
+# The path to the certifying authority cert file to use in SSL
+# connections to the registry server, if any. Alternately, you may set
+# the GLANCE_CLIENT_CA_FILE environment variable to a filepath of the
+# CA cert file. (string value)
+#registry_client_ca_file = <None>
+
+# When using SSL in connections to the registry server, do not require
+# validation via a certifying authority. This is the registry's
+# equivalent of specifying --insecure on the command line using
+# glanceclient for the API. (boolean value)
+#registry_client_insecure = false
+
+# The period of time, in seconds, that the API server will wait for a
+# registry request to complete. A value of 0 implies no timeout.
+# (integer value)
+#registry_client_timeout = 600
+
+# Whether to pass through headers containing user and tenant
+# information when making requests to the registry. This allows the
+# registry to use the context middleware without keystonemiddleware's
+# auth_token middleware, removing calls to the keystone auth service.
+# It is recommended that when using this option, secure communication
+# between glance api and glance registry is ensured by means other
+# than auth_token middleware. (boolean value)
+#send_identity_headers = false
+
+# The amount of time in seconds to delay before performing a delete.
+# (integer value)
+#scrub_time = 0
+
+# The size of thread pool to be used for scrubbing images. The default
+# is one, which signifies serial scrubbing. Any value above one
+# indicates the max number of images that may be scrubbed in parallel.
+# (integer value)
+#scrub_pool_size = 1
+
+# Turn on/off delayed delete. (boolean value)
+#delayed_delete = false
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of
+# the default INFO level. (boolean value)
+debug = True
+
+# If set to false, the logging level will be set to WARNING instead of
+# the default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+verbose = True
+
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
+# Note that when logging configuration files are used then all logging
+# configuration is set in the configuration file and other logging
+# configuration options are ignored (for example,
+# logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set.
+# (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default
+# is set, logging will go to stderr as defined by use_stderr. This
+# option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+log_file = {RIFT_VAR_ROOT}/log/glance/glance-api.log
+
+# (Optional) The base directory used for relative log_file  paths.
+# This option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is
+# moved or removed this handler will open a new log file with
+# specified path instantaneously. It makes sense only if log_file
+# option is specified and Linux platform is used. This option is
+# ignored if log_config_append is set. (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and
+# will be changed later to honor RFC5424. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined.
+# (string value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the
+# message is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string
+# value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is
+# ignored if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+#
+# From oslo.messaging
+#
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+#rpc_conn_pool_size = 30
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve to this
+# address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+# Allowed values: redis, dummy
+#rpc_zmq_matchmaker = redis
+
+# Type of concurrency used. Either "native" or "eventlet" (string
+# value)
+#rpc_zmq_concurrency = eventlet
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic.
+# Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address.
+# Must match "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Seconds to wait before a cast expires (TTL). The default value of -1
+# specifies an infinite linger period. The value of 0 specifies no
+# linger period. Pending messages shall be discarded immediately when
+# the socket is closed. Only supported by impl_zmq. (integer value)
+#rpc_cast_timeout = -1
+
+# The default number of seconds that poll should wait. Poll raises
+# timeout exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about
+# existing target ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 120
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy.
+# (boolean value)
+#use_pub_sub = true
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49152
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with
+# ZMQBindError. (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Size of executor thread pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout = 60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend option
+# and driver specific configuration. (string value)
+#transport_url = <None>
+
+# The messaging driver to use, defaults to rabbit. Other drivers
+# include amqp and zmq. (string value)
+#rpc_backend = rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the transport_url
+# option. (string value)
+#control_exchange = openstack
+
+
+[cors]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain
+# received in the requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials
+# (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to
+# HTTP Simple Headers. (list value)
+#expose_headers = X-Image-Meta-Checksum,X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list
+# value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual
+# request. (list value)
+#allow_headers = Content-MD5,X-Image-Meta-Checksum,X-Storage-Token,Accept-Encoding,X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
+
+
+[cors.subdomain]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain
+# received in the requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials
+# (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to
+# HTTP Simple Headers. (list value)
+#expose_headers = X-Image-Meta-Checksum,X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list
+# value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual
+# request. (list value)
+#allow_headers = Content-MD5,X-Image-Meta-Checksum,X-Storage-Token,Accept-Encoding,X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
+
+
+[database]
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
+sqlite_db = {RIFT_VAR_ROOT}/glance/glance-api.db
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database.
+# (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+
+# The SQLAlchemy connection string to use to connect to the slave
+# database. (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including
+# the default, overrides any server-set SQL mode. To use whatever SQL
+# mode is set by the server configuration, set this to no value.
+# Example: mysql_sql_mode= (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to
+# -1 to specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer
+# value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection
+# lost. (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database
+# operation up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries
+# of a database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before
+# error is raised. Set to -1 to specify an infinite retry count.
+# (integer value)
+#db_max_retries = 20
+
+#
+# From oslo.db.concurrency
+#
+
+# Enable the experimental use of thread pooling for all DB API calls
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
+#use_tpool = false
+
+
+[glance_store]
+
+#
+# From glance.store
+#
+
+# List of stores enabled. Valid stores are: cinder, file, http, rbd,
+# sheepdog, swift, s3, vsphere (list value)
+stores = file,http
+
+# Default scheme to use to store image data. The scheme must be
+# registered by one of the stores defined by the 'stores' config
+# option. (string value)
+default_store = file
+
+# Minimum interval seconds to execute updating dynamic storage
+# capabilities based on backend status then. It's not a periodic
+# routine, the update logic will be executed only when interval
+# seconds elapsed and an operation of store has triggered. The feature
+# will be enabled only when the option value greater then zero.
+# (integer value)
+#store_capabilities_update_min_interval = 0
+
+# Specify the path to the CA bundle file to use in verifying the
+# remote server certificate. (string value)
+#https_ca_certificates_file = <None>
+
+# If true, the remote server certificate is not verified. If false,
+# then the default CA truststore is used for verification. This option
+# is ignored if "https_ca_certificates_file" is set. (boolean value)
+#https_insecure = true
+
+# Specify the http/https proxy information that should be used to
+# connect to the remote server. The proxy information should be a key
+# value pair of the scheme and proxy. e.g. http:10.0.0.1:3128. You can
+# specify proxies for multiple schemes by seperating the key value
+# pairs with a comma.e.g. http:10.0.0.1:3128, https:10.0.0.1:1080.
+# (dict value)
+#http_proxy_information =
+
+# If True, swiftclient won't check for a valid SSL certificate when
+# authenticating. (boolean value)
+#swift_store_auth_insecure = false
+
+# A string giving the CA certificate file to use in SSL connections
+# for verifying certs. (string value)
+#swift_store_cacert = <None>
+
+# The region of the swift endpoint to be used for single tenant. This
+# setting is only necessary if the tenant has multiple swift
+# endpoints. (string value)
+#swift_store_region = <None>
+
+# If set, the configured endpoint will be used. If None, the storage
+# url from the auth response will be used. (string value)
+#swift_store_endpoint = <None>
+
+# A string giving the endpoint type of the swift service to use
+# (publicURL, adminURL or internalURL). This setting is only used if
+# swift_store_auth_version is 2. (string value)
+#swift_store_endpoint_type = publicURL
+
+# A string giving the service type of the swift service to use. This
+# setting is only used if swift_store_auth_version is 2. (string
+# value)
+#swift_store_service_type = object-store
+
+# Container within the account that the account should use for storing
+# images in Swift when using single container mode. In multiple
+# container mode, this will be the prefix for all containers. (string
+# value)
+#swift_store_container = glance
+
+# The size, in MB, that Glance will start chunking image files and do
+# a large object manifest in Swift. (integer value)
+#swift_store_large_object_size = 5120
+
+# The amount of data written to a temporary disk buffer during the
+# process of chunking the image file. (integer value)
+#swift_store_large_object_chunk_size = 200
+
+# A boolean value that determines if we create the container if it
+# does not exist. (boolean value)
+#swift_store_create_container_on_put = false
+
+# If set to True, enables multi-tenant storage mode which causes
+# Glance images to be stored in tenant specific Swift accounts.
+# (boolean value)
+#swift_store_multi_tenant = false
+
+# When set to 0, a single-tenant store will only use one container to
+# store all images. When set to an integer value between 1 and 32, a
+# single-tenant store will use multiple containers to store images,
+# and this value will determine how many containers are created.Used
+# only when swift_store_multi_tenant is disabled. The total number of
+# containers that will be used is equal to 16^N, so if this config
+# option is set to 2, then 16^2=256 containers will be used to store
+# images. (integer value)
+#swift_store_multiple_containers_seed = 0
+
+# A list of tenants that will be granted read/write access on all
+# Swift containers created by Glance in multi-tenant mode. (list
+# value)
+#swift_store_admin_tenants =
+
+# If set to False, disables SSL layer compression of https swift
+# requests. Setting to False may improve performance for images which
+# are already in a compressed format, eg qcow2. (boolean value)
+#swift_store_ssl_compression = true
+
+# The number of times a Swift download will be retried before the
+# request fails. (integer value)
+#swift_store_retry_get_count = 0
+
+# The period of time (in seconds) before token expirationwhen
+# glance_store will try to reques new user token. Default value 60 sec
+# means that if token is going to expire in 1 min then glance_store
+# request new user token. (integer value)
+#swift_store_expire_soon_interval = 60
+
+# If set to True create a trust for each add/get request to Multi-
+# tenant store in order to prevent authentication token to be expired
+# during uploading/downloading data. If set to False then user token
+# is used for Swift connection (so no overhead on trust creation).
+# Please note that this option is considered only and only if
+# swift_store_multi_tenant=True (boolean value)
+#swift_store_use_trusts = true
+
+# The reference to the default swift account/backing store parameters
+# to use for adding new images. (string value)
+#default_swift_reference = ref1
+
+# Version of the authentication service to use. Valid versions are 2
+# and 3 for keystone and 1 (deprecated) for swauth and rackspace.
+# (deprecated - use "auth_version" in swift_store_config_file) (string
+# value)
+#swift_store_auth_version = 2
+
+# The address where the Swift authentication service is listening.
+# (deprecated - use "auth_address" in swift_store_config_file) (string
+# value)
+#swift_store_auth_address = <None>
+
+# The user to authenticate against the Swift authentication service
+# (deprecated - use "user" in swift_store_config_file) (string value)
+#swift_store_user = <None>
+
+# Auth key for the user authenticating against the Swift
+# authentication service. (deprecated - use "key" in
+# swift_store_config_file) (string value)
+#swift_store_key = <None>
+
+# The config file that has the swift account(s)configs. (string value)
+#swift_store_config_file = <None>
+
+# RADOS images will be chunked into objects of this size (in
+# megabytes). For best performance, this should be a power of two.
+# (integer value)
+#rbd_store_chunk_size = 8
+
+# RADOS pool in which images are stored. (string value)
+#rbd_store_pool = images
+
+# RADOS user to authenticate as (only applicable if using Cephx. If
+# <None>, a default will be chosen based on the client. section in
+# rbd_store_ceph_conf) (string value)
+#rbd_store_user = <None>
+
+# Ceph configuration file path. If <None>, librados will locate the
+# default config. If using cephx authentication, this file should
+# include a reference to the right keyring in a client.<USER> section
+# (string value)
+#rbd_store_ceph_conf = /etc/ceph/ceph.conf
+
+# Timeout value (in seconds) used when connecting to ceph cluster. If
+# value <= 0, no timeout is set and default librados value is used.
+# (integer value)
+#rados_connect_timeout = 0
+
+# Info to match when looking for cinder in the service catalog. Format
+# is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volumev2::publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v2/%(tenant)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node. If specified, it will be used to locate
+# OpenStack services for stores. (string value)
+# Deprecated group/name - [DEFAULT]/os_region_name
+#cinder_os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests.
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Time period of time in seconds to wait for a cinder volume
+# transition to complete. (integer value)
+#cinder_state_transition_timeout = 300
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = false
+
+# The address where the Cinder authentication service is listening. If
+# <None>, the cinder endpoint in the service catalog is used. (string
+# value)
+#cinder_store_auth_address = <None>
+
+# User name to authenticate against Cinder. If <None>, the user of
+# current context is used. (string value)
+#cinder_store_user_name = <None>
+
+# Password for the user authenticating against Cinder. If <None>, the
+# current context auth token is used. (string value)
+#cinder_store_password = <None>
+
+# Project name where the image is stored in Cinder. If <None>, the
+# project in current context is used. (string value)
+#cinder_store_project_name = <None>
+
+# Path to the rootwrap configuration file to use for running commands
+# as root. (string value)
+#rootwrap_config = /etc/glance/rootwrap.conf
+
+# The host where the S3 server is listening. (string value)
+#s3_store_host = <None>
+
+# The S3 query token access key. (string value)
+#s3_store_access_key = <None>
+
+# The S3 query token secret key. (string value)
+#s3_store_secret_key = <None>
+
+# The S3 bucket to be used to store the Glance data. (string value)
+#s3_store_bucket = <None>
+
+# The local directory where uploads will be staged before they are
+# transferred into S3. (string value)
+#s3_store_object_buffer_dir = <None>
+
+# A boolean to determine if the S3 bucket should be created on upload
+# if it does not exist or if an error should be returned to the user.
+# (boolean value)
+#s3_store_create_bucket_on_put = false
+
+# The S3 calling format used to determine the bucket. Either subdomain
+# or path can be used. (string value)
+#s3_store_bucket_url_format = subdomain
+
+# What size, in MB, should S3 start chunking image files and do a
+# multipart upload in S3. (integer value)
+#s3_store_large_object_size = 100
+
+# What multipart upload part size, in MB, should S3 use when uploading
+# parts. The size must be greater than or equal to 5M. (integer value)
+#s3_store_large_object_chunk_size = 10
+
+# The number of thread pools to perform a multipart upload in S3.
+# (integer value)
+#s3_store_thread_pools = 10
+
+# Enable the use of a proxy. (boolean value)
+#s3_store_enable_proxy = false
+
+# Address or hostname for the proxy server. (string value)
+#s3_store_proxy_host = <None>
+
+# The port to use when connecting over a proxy. (integer value)
+#s3_store_proxy_port = 8080
+
+# The username to connect to the proxy. (string value)
+#s3_store_proxy_user = <None>
+
+# The password to use when connecting over a proxy. (string value)
+#s3_store_proxy_password = <None>
+
+# Images will be chunked into objects of this size (in megabytes). For
+# best performance, this should be a power of two. (integer value)
+#sheepdog_store_chunk_size = 64
+
+# Port of sheep daemon. (integer value)
+#sheepdog_store_port = 7000
+
+# IP address of sheep daemon. (string value)
+#sheepdog_store_address = localhost
+
+# Directory to which the Filesystem backend store writes images.
+# (string value)
+filesystem_store_datadir = {RIFT_VAR_ROOT}/glance/images/
+
+# List of directories and its priorities to which the Filesystem
+# backend store writes images. (multi valued)
+#filesystem_store_datadirs =
+
+# The path to a file which contains the metadata to be returned with
+# any location associated with this store.  The file must contain a
+# valid JSON object. The object should contain the keys 'id' and
+# 'mountpoint'. The value for both keys should be 'string'. (string
+# value)
+#filesystem_store_metadata_file = <None>
+
+# The required permission for created image file. In this way the user
+# other service used, e.g. Nova, who consumes the image could be the
+# exclusive member of the group that owns the files created. Assigning
+# it less then or equal to zero means don't change the default
+# permission of the file. This value will be decoded as an octal
+# digit. (integer value)
+#filesystem_store_file_perm = 0
+
+# ESX/ESXi or vCenter Server target system. The server value can be an
+# IP address or a DNS name. (string value)
+#vmware_server_host = <None>
+
+# Username for authenticating with VMware ESX/VC server. (string
+# value)
+#vmware_server_username = <None>
+
+# Password for authenticating with VMware ESX/VC server. (string
+# value)
+#vmware_server_password = <None>
+
+# Number of times VMware ESX/VC server API must be retried upon
+# connection related issues. (integer value)
+#vmware_api_retry_count = 10
+
+# The interval used for polling remote tasks invoked on VMware ESX/VC
+# server. (integer value)
+#vmware_task_poll_interval = 5
+
+# The name of the directory where the glance images will be stored in
+# the VMware datastore. (string value)
+#vmware_store_image_dir = /openstack_glance
+
+# If true, the ESX/vCenter server certificate is not verified. If
+# false, then the default CA truststore is used for verification. This
+# option is ignored if "vmware_ca_file" is set. (boolean value)
+# Deprecated group/name - [DEFAULT]/vmware_api_insecure
+#vmware_insecure = false
+
+# Specify a CA bundle file to use in verifying the ESX/vCenter server
+# certificate. (string value)
+#vmware_ca_file = <None>
+
+# A list of datastores where the image can be stored. This option may
+# be specified multiple times for specifying multiple datastores. The
+# datastore name should be specified after its datacenter path,
+# seperated by ":". An optional weight may be given after the
+# datastore name, seperated again by ":". Thus, the required format
+# becomes <datacenter_path>:<datastore_name>:<optional_weight>. When
+# adding an image, the datastore with highest weight will be selected,
+# unless there is not enough free space available in cases where the
+# image size is already known. If no weight is given, it is assumed to
+# be zero and the directory will be considered for selection last. If
+# multiple datastores have the same weight, then the one with the most
+# free space available is selected. (multi valued)
+#vmware_datastores =
+
+
+[image_format]
+
+#
+# From glance.api
+#
+
+# Supported values for the 'container_format' image attribute (list
+# value)
+# Deprecated group/name - [DEFAULT]/container_formats
+container_formats = ami,ari,aki,bare,ovf,ova,docker
+
+# Supported values for the 'disk_format' image attribute (list value)
+# Deprecated group/name - [DEFAULT]/disk_formats
+disk_formats = ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso
+
+
+[keystone_authtoken]
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete public Identity API endpoint. (string value)
+#auth_uri = <None>
+
+# API version of the admin Identity API endpoint. (string value)
+#auth_version = <None>
+
+# Do not handle authorization requests within the middleware, but
+# delegate the authorization decision to downstream WSGI components.
+# (boolean value)
+#delay_auth_decision = false
+
+# Request timeout value for communicating with Identity API server.
+# (integer value)
+#http_connect_timeout = <None>
+
+# How many times are we trying to reconnect when communicating with
+# Identity API Server. (integer value)
+#http_request_max_retries = 3
+
+# Env key for the swift cache. (string value)
+#cache = <None>
+
+# Required if identity server requires client certificate (string
+# value)
+#certfile = <None>
+
+# Required if identity server requires client certificate (string
+# value)
+#keyfile = <None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. Defaults to system CAs. (string value)
+#cafile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# The region in which the identity server can be found. (string value)
+#region_name = <None>
+
+# Directory used to cache files related to PKI tokens. (string value)
+#signing_dir = <None>
+
+# Optionally specify a list of memcached server(s) to use for caching.
+# If left undefined, tokens will instead be cached in-process. (list
+# value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers = <None>
+
+# In order to prevent excessive effort spent validating tokens, the
+# middleware caches previously-seen tokens for a configurable duration
+# (in seconds). Set to -1 to disable caching completely. (integer
+# value)
+#token_cache_time = 300
+
+# Determines the frequency at which the list of revoked tokens is
+# retrieved from the Identity service (in seconds). A high number of
+# revocation events combined with a low cache duration may
+# significantly reduce performance. (integer value)
+#revocation_cache_time = 10
+
+# (Optional) If defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token data is
+# encrypted and authenticated in the cache. If the value is not one of
+# these options or empty, auth_token will raise an exception on
+# initialization. (string value)
+# Allowed values: None, MAC, ENCRYPT
+#memcache_security_strategy = None
+
+# (Optional, mandatory if memcache_security_strategy is defined) This
+# string is used for key derivation. (string value)
+#memcache_secret_key = <None>
+
+# (Optional) Number of seconds memcached server is considered dead
+# before it is tried again. (integer value)
+#memcache_pool_dead_retry = 300
+
+# (Optional) Maximum total number of open connections to every
+# memcached server. (integer value)
+#memcache_pool_maxsize = 10
+
+# (Optional) Socket timeout in seconds for communicating with a
+# memcached server. (integer value)
+#memcache_pool_socket_timeout = 3
+
+# (Optional) Number of seconds a connection to memcached is held
+# unused in the pool before it is closed. (integer value)
+#memcache_pool_unused_timeout = 60
+
+# (Optional) Number of seconds that an operation will wait to get a
+# memcached client connection from the pool. (integer value)
+#memcache_pool_conn_get_timeout = 10
+
+# (Optional) Use the advanced (eventlet safe) memcached client pool.
+# The advanced pool will only work under python 2.x. (boolean value)
+#memcache_use_advanced_pool = false
+
+# (Optional) Indicate whether to set the X-Service-Catalog header. If
+# False, middleware will not ask for service catalog on token
+# validation and will not set the X-Service-Catalog header. (boolean
+# value)
+#include_service_catalog = true
+
+# Used to control the use and type of token binding. Can be set to:
+# "disabled" to not check token binding. "permissive" (default) to
+# validate binding information if the bind type is of a form known to
+# the server and ignore it if not. "strict" like "permissive" but if
+# the bind type is unknown the token will be rejected. "required" any
+# form of token binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string value)
+#enforce_token_bind = permissive
+
+# If true, the revocation list will be checked for cached tokens. This
+# requires that PKI tokens are configured on the identity server.
+# (boolean value)
+#check_revocations_for_cached = false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a single
+# algorithm or multiple. The algorithms are those supported by Python
+# standard hashlib.new(). The hashes will be tried in the order given,
+# so put the preferred one first for performance. The result of the
+# first hash will be stored in the cache. This will typically be set
+# to multiple values only while migrating from a less secure algorithm
+# to a more secure one. Once all the old tokens are expired this
+# option should be set to a single value for better performance. (list
+# value)
+#hash_algorithms = md5
+
+# Authentication type to load (unknown value)
+# Deprecated group/name - [DEFAULT]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (unknown
+# value)
+#auth_section = <None>
+
+
+[matchmaker_redis]
+
+#
+# From oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host = 127.0.0.1
+
+# Use this port to connect to redis host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#port = 6379
+
+# Password for Redis server (optional). (string value)
+#password =
+
+# List of Redis Sentinel hosts (fault tolerance mode) e.g.
+# [host:port, host1:port ... ] (list value)
+#sentinel_hosts =
+
+# Redis replica set name. (string value)
+#sentinel_group_name = oslo-messaging-zeromq
+
+# Time in ms to wait between connection attempts. (integer value)
+#wait_timeout = 500
+
+# Time in ms to wait before the transaction is killed. (integer value)
+#check_timeout = 20000
+
+# Timeout in ms on blocking socket operations (integer value)
+#socket_timeout = 1000
+
+
+[oslo_concurrency]
+
+#
+# From oslo.concurrency
+#
+
+# Enables or disables inter-process locks. (boolean value)
+# Deprecated group/name - [DEFAULT]/disable_process_locking
+#disable_process_locking = false
+
+# Directory to use for lock files.  For security, the specified
+# directory should only be writable by the user running the processes
+# that need locking. Defaults to environment variable OSLO_LOCK_PATH.
+# If external locks are used, a lock path must be set. (string value)
+# Deprecated group/name - [DEFAULT]/lock_path
+#lock_path = <None>
+
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# address prefix used when sending to a specific server (string value)
+# Deprecated group/name - [amqp1]/server_request_prefix
+#server_request_prefix = exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+# Deprecated group/name - [amqp1]/broadcast_prefix
+#broadcast_prefix = broadcast
+
+# address prefix when sending to any server in group (string value)
+# Deprecated group/name - [amqp1]/group_request_prefix
+#group_request_prefix = unicast
+
+# Name for the AMQP container (string value)
+# Deprecated group/name - [amqp1]/container_name
+#container_name = <None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+# Deprecated group/name - [amqp1]/idle_timeout
+#idle_timeout = 0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+# Deprecated group/name - [amqp1]/trace
+#trace = false
+
+# CA certificate PEM file to verify server certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_ca_file
+#ssl_ca_file =
+
+# Identifying certificate PEM file to present to clients (string
+# value)
+# Deprecated group/name - [amqp1]/ssl_cert_file
+#ssl_cert_file =
+
+# Private key PEM file used to sign cert_file certificate (string
+# value)
+# Deprecated group/name - [amqp1]/ssl_key_file
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+# Deprecated group/name - [amqp1]/ssl_key_password
+#ssl_key_password = <None>
+
+# Accept clients using either SSL or plain TCP (boolean value)
+# Deprecated group/name - [amqp1]/allow_insecure_clients
+#allow_insecure_clients = false
+
+# Space separated list of acceptable SASL mechanisms (string value)
+# Deprecated group/name - [amqp1]/sasl_mechanisms
+#sasl_mechanisms =
+
+# Path to directory that contains the SASL configuration (string
+# value)
+# Deprecated group/name - [amqp1]/sasl_config_dir
+#sasl_config_dir =
+
+# Name of configuration file (without .conf suffix) (string value)
+# Deprecated group/name - [amqp1]/sasl_config_name
+#sasl_config_name =
+
+# User name for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/username
+#username =
+
+# Password for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/password
+#password =
+
+
+[oslo_messaging_notifications]
+
+#
+# From oslo.messaging
+#
+
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
+
+# A URL representing the messaging driver to use for notifications. If
+# not set, we fall back to the same configuration used for RPC.
+# (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_auto_delete
+#amqp_auto_delete = false
+
+# SSL version to use (valid only if SSL enabled). Valid values are
+# TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be
+# available on some distributions. (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_version
+#kombu_ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
+#kombu_ssl_keyfile =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
+#kombu_ssl_certfile =
+
+# SSL certification authority file (valid only if SSL enabled).
+# (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
+#kombu_ssl_ca_certs =
+
+# How long to wait before reconnecting in response to an AMQP consumer
+# cancel notification. (floating point value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
+#kombu_reconnect_delay = 1.0
+
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression
+# will not be used. This option may notbe available in future
+# versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client beforce abandoning to send it its
+# replies. This value should not be longer than rpc_response_timeout.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we
+# are currently connected to becomes unavailable. Takes effect only if
+# more than one RabbitMQ node is provided in config. (string value)
+# Allowed values: round-robin, shuffle
+#kombu_failover_strategy = round-robin
+
+# The RabbitMQ broker address where a single node is used. (string
+# value)
+# Deprecated group/name - [DEFAULT]/rabbit_host
+#rabbit_host = localhost
+
+# The RabbitMQ broker port where a single node is used. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# Deprecated group/name - [DEFAULT]/rabbit_port
+#rabbit_port = 5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+# Deprecated group/name - [DEFAULT]/rabbit_hosts
+#rabbit_hosts = $rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
+#rabbit_use_ssl = false
+
+# The RabbitMQ userid. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_userid
+#rabbit_userid = guest
+
+# The RabbitMQ password. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_password
+#rabbit_password = guest
+
+# The RabbitMQ login method. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_login_method
+#rabbit_login_method = AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
+#rabbit_virtual_host = /
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval = 1
+
+# How long to backoff for between retries when connecting to RabbitMQ.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
+#rabbit_retry_backoff = 2
+
+# Maximum interval of RabbitMQ connection retries. Default is 30
+# seconds. (integer value)
+#rabbit_interval_max = 30
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_max_retries
+#rabbit_max_retries = 0
+
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. In RabbitMQ 3.0,
+# queue mirroring is no longer controlled by the x-ha-policy argument
+# when declaring a queue. If you just want to make sure that all
+# queues (except  those with auto-generated names) are mirrored across
+# all nodes, run: "rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha-
+# mode": "all"}' " (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
+#rabbit_ha_queues = false
+
+# Positive integer representing duration in seconds for queue TTL
+# (x-expires). Queues which are unused for the duration of the TTL are
+# automatically deleted. The parameter affects only reply and fanout
+# queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 600
+
+# Specifies the number of messages to prefetch. Setting to zero allows
+# unlimited messages. (integer value)
+#rabbit_qos_prefetch_count = 0
+
+# Number of seconds after which the Rabbit broker is considered down
+# if heartbeat's keep-alive fails (0 disable the heartbeat).
+# EXPERIMENTAL (integer value)
+#heartbeat_timeout_threshold = 60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate = 2
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/fake_rabbit
+#fake_rabbit = false
+
+# Maximum number of channels to allow (integer value)
+#channel_max = <None>
+
+# The maximum byte size for an AMQP frame (integer value)
+#frame_max = <None>
+
+# How often to send heartbeats for consumer's connections (integer
+# value)
+#heartbeat_interval = 1
+
+# Enable SSL (boolean value)
+#ssl = <None>
+
+# Arguments passed to ssl.wrap_socket (dict value)
+#ssl_options = <None>
+
+# Set socket timeout in seconds for connection's socket (floating
+# point value)
+#socket_timeout = 0.25
+
+# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating
+# point value)
+#tcp_user_timeout = 0.25
+
+# Set delay for reconnection to some host which has connection error
+# (floating point value)
+#host_connection_reconnect_delay = 0.25
+
+# Maximum number of connections to keep queued. (integer value)
+#pool_max_size = 10
+
+# Maximum number of connections to create above `pool_max_size`.
+# (integer value)
+#pool_max_overflow = 0
+
+# Default number of seconds to wait for a connections to available
+# (integer value)
+#pool_timeout = 30
+
+# Lifetime of a connection (since creation) in seconds or None for no
+# recycling. Expired connections are closed on acquire. (integer
+# value)
+#pool_recycle = 600
+
+# Threshold at which inactive (since release) connections are
+# considered stale in seconds or None for no staleness. Stale
+# connections are closed on acquire. (integer value)
+#pool_stale = 60
+
+# Persist notification messages. (boolean value)
+#notification_persistence = false
+
+# Exchange name for for sending notifications (string value)
+#default_notification_exchange = ${control_exchange}_notification
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# notification listener. (integer value)
+#notification_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during
+# sending notification, -1 means infinite retry. (integer value)
+#default_notification_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending notification message (floating point value)
+#notification_retry_delay = 0.25
+
+# Time to live for rpc queues without consumers in seconds. (integer
+# value)
+#rpc_queue_expiration = 60
+
+# Exchange name for sending RPC messages (string value)
+#default_rpc_exchange = ${control_exchange}_rpc
+
+# Exchange name for receiving RPC replies (string value)
+#rpc_reply_exchange = ${control_exchange}_rpc_reply
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# rpc listener. (integer value)
+#rpc_listener_prefetch_count = 100
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# rpc reply listener. (integer value)
+#rpc_reply_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during
+# sending reply. -1 means infinite retry during rpc_timeout (integer
+# value)
+#rpc_reply_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending reply. (floating point value)
+#rpc_reply_retry_delay = 0.25
+
+# Reconnecting retry count in case of connectivity problem during
+# sending RPC message, -1 means infinite retry. If actual retry
+# attempts in not 0 the rpc request could be processed more then one
+# time (integer value)
+#default_rpc_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending RPC message (floating point value)
+#rpc_retry_delay = 0.25
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string
+# value)
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be
+# relative to any directory in the search path defined by the
+# config_dir option, or absolute paths. The file defined by
+# policy_file must exist for these directories to be searched.
+# Missing or empty directories are ignored. (multi valued)
+# Deprecated group/name - [DEFAULT]/policy_dirs
+#policy_dirs = policy.d
+
+
+[paste_deploy]
+
+#
+# From glance.api
+#
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-api-keystone] use the value "keystone" (string
+# value)
+flavor =
+
+# Name of the paste configuration file. (string value)
+config_file = {RIFT_INSTALL}/etc/glance/glance-api-paste.ini
+
+
+[profiler]
+
+#
+# From glance.api
+#
+
+# If False fully disable profiling feature. (boolean value)
+#enabled = false
+
+# If False doesn't trace SQL requests. (boolean value)
+#trace_sqlalchemy = false
+
+# Secret key to use to sign Glance API and Glance Registry services
+# tracing messages. (string value)
+#hmac_keys = SECRET_KEY
+
+
+[store_type_location_strategy]
+
+#
+# From glance.api
+#
+
+# The store names to use to get store preference order. The name must
+# be registered by one of the stores defined by the 'stores' config
+# option. This option will be applied when you using 'store_type'
+# option as image location strategy defined by the 'location_strategy'
+# config option. (list value)
+#store_type_preference =
+
+
+[task]
+
+#
+# From glance.api
+#
+
+# Time in hours for which a task lives after, either succeeding or
+# failing (integer value)
+# Deprecated group/name - [DEFAULT]/task_time_to_live
+#task_time_to_live = 48
+
+# Specifies which task executor to be used to run the task scripts.
+# (string value)
+#task_executor = taskflow
+
+# Work dir for asynchronous task operations. The directory set here
+# will be used to operate over images - normally before they are
+# imported in the destination store. When providing work dir, make
+# sure enough space is provided for concurrent tasks to run
+# efficiently without running out of space. A rough estimation can be
+# done by multiplying the number of `max_workers` - or the N of
+# workers running - by an average image size (e.g 500MB). The image
+# size estimation should be done based on the average size in your
+# deployment. Note that depending on the tasks running you may need to
+# multiply this number by some factor depending on what the task does.
+# For example, you may want to double the available size if image
+# conversion is enabled. All this being said, remember these are just
+# estimations and you should do them based on the worst case scenario
+# and be prepared to act in case they were wrong. (string value)
+#work_dir = <None>
+
+
+[taskflow_executor]
+
+#
+# From glance.api
+#
+
+# The mode in which the engine will run. Can be 'serial' or
+# 'parallel'. (string value)
+# Allowed values: serial, parallel
+#engine_mode = parallel
+
+# The number of parallel activities executed at the same time by the
+# engine. The value can be greater than one when the engine mode is
+# 'parallel'. (integer value)
+# Deprecated group/name - [task]/eventlet_executor_pool_size
+#max_workers = 10
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-cache.conf b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-cache.conf
new file mode 100644
index 0000000..bc7337c
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-cache.conf
@@ -0,0 +1,338 @@
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+[DEFAULT]
+
+#
+# From glance.cache
+#
+
+# Whether to allow users to specify image properties beyond what the
+# image schema provides (boolean value)
+#allow_additional_image_properties = true
+
+# Maximum number of image members per image. Negative values evaluate
+# to unlimited. (integer value)
+#image_member_quota = 128
+
+# Maximum number of properties allowed on an image. Negative values
+# evaluate to unlimited. (integer value)
+#image_property_quota = 128
+
+# Maximum number of tags allowed on an image. Negative values evaluate
+# to unlimited. (integer value)
+#image_tag_quota = 128
+
+# Maximum number of locations allowed on an image. Negative values
+# evaluate to unlimited. (integer value)
+#image_location_quota = 10
+
+# Python module path of data access API (string value)
+#data_api = glance.db.sqlalchemy.api
+
+# Default value for the number of items returned by a request if not
+# specified explicitly in the request (integer value)
+#limit_param_default = 25
+
+# Maximum permissible number of items that could be returned by a
+# request (integer value)
+#api_limit_max = 1000
+
+# Whether to include the backend image storage location in image
+# properties. Revealing storage location can be a security risk, so
+# use this setting with caution! (boolean value)
+#show_image_direct_url = false
+
+# Whether to include the backend image locations in image properties.
+# For example, if using the file system store a URL of
+# "file:///path/to/image" will be returned to the user in the
+# 'direct_url' meta-data field. Revealing storage location can be a
+# security risk, so use this setting with caution! Setting this to
+# true overrides the show_image_direct_url option. (boolean value)
+#show_multiple_locations = false
+
+# Maximum size of image a user can upload in bytes. Defaults to
+# 1099511627776 bytes (1 TB).WARNING: this value should only be
+# increased after careful consideration and must be set to a value
+# under 8 EB (9223372036854775808). (integer value)
+# Maximum value: 9223372036854775808
+#image_size_cap = 1099511627776
+
+# Set a system wide quota for every user. This value is the total
+# capacity that a user can use across all storage systems. A value of
+# 0 means unlimited.Optional unit can be specified for the value.
+# Accepted units are B, KB, MB, GB and TB representing Bytes,
+# KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. If no
+# unit is specified then Bytes is assumed. Note that there should not
+# be any space between value and unit and units are case sensitive.
+# (string value)
+#user_storage_quota = 0
+
+# Deploy the v1 OpenStack Images API. (boolean value)
+#enable_v1_api = true
+
+# Deploy the v2 OpenStack Images API. (boolean value)
+#enable_v2_api = true
+
+# Deploy the v1 OpenStack Registry API. (boolean value)
+#enable_v1_registry = true
+
+# Deploy the v2 OpenStack Registry API. (boolean value)
+#enable_v2_registry = true
+
+# The hostname/IP of the pydev process listening for debug connections
+# (string value)
+#pydev_worker_debug_host = <None>
+
+# The port on which a pydev process is listening for connections.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#pydev_worker_debug_port = 5678
+
+# AES key for encrypting store 'location' metadata. This includes, if
+# used, Swift or S3 credentials. Should be set to a random string of
+# length 16, 24 or 32 bytes (string value)
+#metadata_encryption_key = <None>
+
+# Digest algorithm which will be used for digital signature. Use the
+# command "openssl list-message-digest-algorithms" to get the
+# available algorithms supported by the version of OpenSSL on the
+# platform. Examples are "sha1", "sha256", "sha512", etc. (string
+# value)
+#digest_algorithm = sha256
+
+# The path to the sqlite file database that will be used for image
+# cache management. (string value)
+#image_cache_sqlite_db = cache.db
+
+# The driver to use for image cache management. (string value)
+#image_cache_driver = sqlite
+
+# The upper limit (the maximum size of accumulated cache in bytes)
+# beyond which the cache pruner, if running, starts cleaning the image
+# cache. (integer value)
+#image_cache_max_size = 10737418240
+
+# The amount of time to let an incomplete image remain in the cache,
+# before the cache cleaner, if running, will remove the incomplete
+# image. (integer value)
+#image_cache_stall_time = 86400
+
+# Base directory that the image cache uses. (string value)
+image_cache_dir = {RIFT_VAR_ROOT}/glance/image-cache/
+
+# Address to find the registry server. (string value)
+registry_host = 127.0.0.1
+
+# Port the registry server is listening on. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+registry_port = 9191
+
+# Whether to pass through the user token when making requests to the
+# registry. To prevent failures with token expiration during big files
+# upload, it is recommended to set this parameter to False.If
+# "use_user_token" is not in effect, then admin credentials can be
+# specified. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#use_user_token = true
+
+# The administrators user name. If "use_user_token" is not in effect,
+# then admin credentials can be specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#admin_user = <None>
+
+# The administrators password. If "use_user_token" is not in effect,
+# then admin credentials can be specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#admin_password = <None>
+
+# The tenant name of the administrative user. If "use_user_token" is
+# not in effect, then admin tenant name can be specified. (string
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#admin_tenant_name = <None>
+
+# The URL to the keystone service. If "use_user_token" is not in
+# effect and using keystone auth, then URL of keystone can be
+# specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#auth_url = <None>
+
+# The strategy to use for authentication. If "use_user_token" is not
+# in effect, then auth strategy can be specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#auth_strategy = noauth
+
+# The region for the authentication service. If "use_user_token" is
+# not in effect and using keystone auth, then region name can be
+# specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#auth_region = <None>
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of
+# the default INFO level. (boolean value)
+debug = false
+
+# If set to false, the logging level will be set to WARNING instead of
+# the default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+verbose = true
+
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
+# Note that when logging configuration files are used then all logging
+# configuration is set in the configuration file and other logging
+# configuration options are ignored (for example,
+# logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set.
+# (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default
+# is set, logging will go to stderr as defined by use_stderr. This
+# option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+log_file = {RIFT_VAR_ROOT}/log/glance/image-cache.log
+
+# (Optional) The base directory used for relative log_file  paths.
+# This option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is
+# moved or removed this handler will open a new log file with
+# specified path instantaneously. It makes sense only if log_file
+# option is specified and Linux platform is used. This option is
+# ignored if log_config_append is set. (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and
+# will be changed later to honor RFC5424. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined.
+# (string value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the
+# message is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string
+# value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is
+# ignored if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string
+# value)
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be
+# relative to any directory in the search path defined by the
+# config_dir option, or absolute paths. The file defined by
+# policy_file must exist for these directories to be searched.
+# Missing or empty directories are ignored. (multi valued)
+# Deprecated group/name - [DEFAULT]/policy_dirs
+#policy_dirs = policy.d
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-manage.conf b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-manage.conf
new file mode 100644
index 0000000..4790cf9
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-manage.conf
@@ -0,0 +1,226 @@
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+[DEFAULT]
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of
+# the default INFO level. (boolean value)
+debug = false
+
+# If set to false, the logging level will be set to WARNING instead of
+# the default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+verbose = true
+
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
+# Note that when logging configuration files are used then all logging
+# configuration is set in the configuration file and other logging
+# configuration options are ignored (for example,
+# logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set.
+# (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default
+# is set, logging will go to stderr as defined by use_stderr. This
+# option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+log_file = {RIFT_VAR_ROOT}/log/glance/image-manage.log
+
+# (Optional) The base directory used for relative log_file  paths.
+# This option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is
+# moved or removed this handler will open a new log file with
+# specified path instantaneously. It makes sense only if log_file
+# option is specified and Linux platform is used. This option is
+# ignored if log_config_append is set. (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and
+# will be changed later to honor RFC5424. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined.
+# (string value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the
+# message is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string
+# value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is
+# ignored if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[database]
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
+sqlite_db = oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database.
+# (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+
+# The SQLAlchemy connection string to use to connect to the slave
+# database. (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including
+# the default, overrides any server-set SQL mode. To use whatever SQL
+# mode is set by the server configuration, set this to no value.
+# Example: mysql_sql_mode= (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to
+# -1 to specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer
+# value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection
+# lost. (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database
+# operation up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries
+# of a database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before
+# error is raised. Set to -1 to specify an infinite retry count.
+# (integer value)
+#db_max_retries = 20
+
+#
+# From oslo.db.concurrency
+#
+
+# Enable the experimental use of thread pooling for all DB API calls
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
+#use_tpool = false
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-registry-paste.ini b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-registry-paste.ini
new file mode 100644
index 0000000..492dbc6
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-registry-paste.ini
@@ -0,0 +1,35 @@
+# Use this pipeline for no auth - DEFAULT
+[pipeline:glance-registry]
+pipeline = healthcheck osprofiler unauthenticated-context registryapp
+
+# Use this pipeline for keystone auth
+[pipeline:glance-registry-keystone]
+pipeline = healthcheck osprofiler authtoken context registryapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user.
+[pipeline:glance-registry-trusted-auth]
+pipeline = healthcheck osprofiler context registryapp
+
+[app:registryapp]
+paste.app_factory = glance.registry.api:API.factory
+
+[filter:healthcheck]
+paste.filter_factory = oslo_middleware:Healthcheck.factory
+backends = disable_by_file
+disable_by_file_path = /etc/glance/healthcheck_disable
+
+[filter:context]
+paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
+
+[filter:unauthenticated-context]
+paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY  #DEPRECATED
+enabled = yes  #DEPRECATED
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-registry.conf b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-registry.conf
new file mode 100644
index 0000000..0fb7ed0
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-registry.conf
@@ -0,0 +1,1431 @@
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+[DEFAULT]
+
+#
+# From glance.registry
+#
+
+# When true, this option sets the owner of an image to be the tenant.
+# Otherwise, the owner of the  image will be the authenticated user
+# issuing the request. (boolean value)
+#owner_is_tenant = true
+
+# Role used to identify an authenticated user as administrator.
+# (string value)
+#admin_role = admin
+
+# Allow unauthenticated users to access the API with read-only
+# privileges. This only applies when using ContextMiddleware. (boolean
+# value)
+#allow_anonymous_access = false
+
+# Limits request ID length. (integer value)
+#max_request_id_length = 64
+
+# Whether to allow users to specify image properties beyond what the
+# image schema provides (boolean value)
+#allow_additional_image_properties = true
+
+# Maximum number of image members per image. Negative values evaluate
+# to unlimited. (integer value)
+#image_member_quota = 128
+
+# Maximum number of properties allowed on an image. Negative values
+# evaluate to unlimited. (integer value)
+#image_property_quota = 128
+
+# Maximum number of tags allowed on an image. Negative values evaluate
+# to unlimited. (integer value)
+#image_tag_quota = 128
+
+# Maximum number of locations allowed on an image. Negative values
+# evaluate to unlimited. (integer value)
+#image_location_quota = 10
+
+# Python module path of data access API (string value)
+data_api = glance.db.sqlalchemy.api
+
+# Default value for the number of items returned by a request if not
+# specified explicitly in the request (integer value)
+#limit_param_default = 25
+
+# Maximum permissible number of items that could be returned by a
+# request (integer value)
+#api_limit_max = 1000
+
+# Whether to include the backend image storage location in image
+# properties. Revealing storage location can be a security risk, so
+# use this setting with caution! (boolean value)
+#show_image_direct_url = false
+
+# Whether to include the backend image locations in image properties.
+# For example, if using the file system store a URL of
+# "file:///path/to/image" will be returned to the user in the
+# 'direct_url' meta-data field. Revealing storage location can be a
+# security risk, so use this setting with caution! Setting this to
+# true overrides the show_image_direct_url option. (boolean value)
+#show_multiple_locations = false
+
+# Maximum size of image a user can upload in bytes. Defaults to
+# 1099511627776 bytes (1 TB).WARNING: this value should only be
+# increased after careful consideration and must be set to a value
+# under 8 EB (9223372036854775808). (integer value)
+# Maximum value: 9223372036854775808
+#image_size_cap = 1099511627776
+
+# Set a system wide quota for every user. This value is the total
+# capacity that a user can use across all storage systems. A value of
+# 0 means unlimited.Optional unit can be specified for the value.
+# Accepted units are B, KB, MB, GB and TB representing Bytes,
+# KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. If no
+# unit is specified then Bytes is assumed. Note that there should not
+# be any space between value and unit and units are case sensitive.
+# (string value)
+#user_storage_quota = 0
+
+# Deploy the v1 OpenStack Images API. (boolean value)
+#enable_v1_api = true
+
+# Deploy the v2 OpenStack Images API. (boolean value)
+#enable_v2_api = true
+
+# Deploy the v1 OpenStack Registry API. (boolean value)
+#enable_v1_registry = true
+
+# Deploy the v2 OpenStack Registry API. (boolean value)
+#enable_v2_registry = true
+
+# The hostname/IP of the pydev process listening for debug connections
+# (string value)
+#pydev_worker_debug_host = <None>
+
+# The port on which a pydev process is listening for connections.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#pydev_worker_debug_port = 5678
+
+# AES key for encrypting store 'location' metadata. This includes, if
+# used, Swift or S3 credentials. Should be set to a random string of
+# length 16, 24 or 32 bytes (string value)
+#metadata_encryption_key = <None>
+
+# Digest algorithm which will be used for digital signature. Use the
+# command "openssl list-message-digest-algorithms" to get the
+# available algorithms supported by the version of OpenSSL on the
+# platform. Examples are "sha1", "sha256", "sha512", etc. (string
+# value)
+#digest_algorithm = sha256
+
+# Address to bind the server.  Useful when selecting a particular
+# network interface. (string value)
+bind_host = 0.0.0.0
+
+# The port on which the server will listen. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+bind_port = 9191
+
+# The backlog value that will be used when creating the TCP listener
+# socket. (integer value)
+#backlog = 4096
+
+# The value for the socket option TCP_KEEPIDLE.  This is the time in
+# seconds that the connection must be idle before TCP starts sending
+# keepalive probes. (integer value)
+#tcp_keepidle = 600
+
+# CA certificate file to use to verify connecting clients. (string
+# value)
+#ca_file = <None>
+
+# Certificate file to use when starting API server securely. (string
+# value)
+#cert_file = <None>
+
+# Private key file to use when starting API server securely. (string
+# value)
+#key_file = <None>
+
+# The number of child process workers that will be created to service
+# requests. The default will be equal to the number of CPUs available.
+# (integer value)
+#workers = <None>
+
+# Maximum line size of message headers to be accepted. max_header_line
+# may need to be increased when using large tokens (typically those
+# generated by the Keystone v3 API with big service catalogs (integer
+# value)
+#max_header_line = 16384
+
+# If False, server will return the header "Connection: close", If
+# True, server will return "Connection: Keep-Alive" in its responses.
+# In order to close the client socket connection explicitly after the
+# response is sent and read successfully by the client, you simply
+# have to set this option to False when you create a wsgi server.
+# (boolean value)
+#http_keepalive = true
+
+# Timeout for client connections' socket operations. If an incoming
+# connection is idle for this number of seconds it will be closed. A
+# value of '0' means wait forever. (integer value)
+#client_socket_timeout = 900
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of
+# the default INFO level. (boolean value)
+debug = true
+
+# If set to false, the logging level will be set to WARNING instead of
+# the default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+verbose = true
+
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
+# Note that when logging configuration files are used then all logging
+# configuration is set in the configuration file and other logging
+# configuration options are ignored (for example,
+# logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set.
+# (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default
+# is set, logging will go to stderr as defined by use_stderr. This
+# option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+log_file = {RIFT_VAR_ROOT}/log/glance/glance-registry.log
+
+# (Optional) The base directory used for relative log_file  paths.
+# This option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is
+# moved or removed this handler will open a new log file with
+# specified path instantaneously. It makes sense only if log_file
+# option is specified and Linux platform is used. This option is
+# ignored if log_config_append is set. (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and
+# will be changed later to honor RFC5424. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined.
+# (string value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the
+# message is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string
+# value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is
+# ignored if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+#
+# From oslo.messaging
+#
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+#rpc_conn_pool_size = 30
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve to this
+# address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+# Allowed values: redis, dummy
+#rpc_zmq_matchmaker = redis
+
+# Type of concurrency used. Either "native" or "eventlet" (string
+# value)
+#rpc_zmq_concurrency = eventlet
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic.
+# Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address.
+# Must match "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Seconds to wait before a cast expires (TTL). The default value of -1
+# specifies an infinite linger period. The value of 0 specifies no
+# linger period. Pending messages shall be discarded immediately when
+# the socket is closed. Only supported by impl_zmq. (integer value)
+#rpc_cast_timeout = -1
+
+# The default number of seconds that poll should wait. Poll raises
+# timeout exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about
+# existing target ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 120
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy.
+# (boolean value)
+#use_pub_sub = true
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49152
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with
+# ZMQBindError. (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Size of executor thread pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout = 60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend option
+# and driver specific configuration. (string value)
+#transport_url = <None>
+
+# The messaging driver to use, defaults to rabbit. Other drivers
+# include amqp and zmq. (string value)
+#rpc_backend = rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the transport_url
+# option. (string value)
+#control_exchange = openstack
+
+
+[database]
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
+#sqlite_db = oslo.sqlite
+sqlite_db = {RIFT_VAR_ROOT}/glance/glance-registry.db
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database.
+# (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+sql_connection=sqlite:///{RIFT_VAR_ROOT}/glance/glance-registry.db
+connection=sqlite:///{RIFT_VAR_ROOT}/glance/glance-registry.db
+
+# The SQLAlchemy connection string to use to connect to the slave
+# database. (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including
+# the default, overrides any server-set SQL mode. To use whatever SQL
+# mode is set by the server configuration, set this to no value.
+# Example: mysql_sql_mode= (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to
+# -1 to specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer
+# value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection
+# lost. (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database
+# operation up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries
+# of a database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before
+# error is raised. Set to -1 to specify an infinite retry count.
+# (integer value)
+#db_max_retries = 20
+
+db_auto_create = True
+
+#
+# From oslo.db.concurrency
+#
+
+# Enable the experimental use of thread pooling for all DB API calls
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
+#use_tpool = false
+
+
+[glance_store]
+
+#
+# From glance.store
+#
+
+# List of stores enabled. Valid stores are: cinder, file, http, rbd,
+# sheepdog, swift, s3, vsphere (list value)
+#stores = file,http
+
+# Default scheme to use to store image data. The scheme must be
+# registered by one of the stores defined by the 'stores' config
+# option. (string value)
+#default_store = file
+
+# Minimum interval seconds to execute updating dynamic storage
+# capabilities based on backend status then. It's not a periodic
+# routine, the update logic will be executed only when interval
+# seconds elapsed and an operation of store has triggered. The feature
+# will be enabled only when the option value greater then zero.
+# (integer value)
+#store_capabilities_update_min_interval = 0
+
+# Specify the path to the CA bundle file to use in verifying the
+# remote server certificate. (string value)
+#https_ca_certificates_file = <None>
+
+# If true, the remote server certificate is not verified. If false,
+# then the default CA truststore is used for verification. This option
+# is ignored if "https_ca_certificates_file" is set. (boolean value)
+#https_insecure = true
+
+# Specify the http/https proxy information that should be used to
+# connect to the remote server. The proxy information should be a key
+# value pair of the scheme and proxy. e.g. http:10.0.0.1:3128. You can
+# specify proxies for multiple schemes by seperating the key value
+# pairs with a comma.e.g. http:10.0.0.1:3128, https:10.0.0.1:1080.
+# (dict value)
+#http_proxy_information =
+
+# If True, swiftclient won't check for a valid SSL certificate when
+# authenticating. (boolean value)
+#swift_store_auth_insecure = false
+
+# A string giving the CA certificate file to use in SSL connections
+# for verifying certs. (string value)
+#swift_store_cacert = <None>
+
+# The region of the swift endpoint to be used for single tenant. This
+# setting is only necessary if the tenant has multiple swift
+# endpoints. (string value)
+#swift_store_region = <None>
+
+# If set, the configured endpoint will be used. If None, the storage
+# url from the auth response will be used. (string value)
+#swift_store_endpoint = <None>
+
+# A string giving the endpoint type of the swift service to use
+# (publicURL, adminURL or internalURL). This setting is only used if
+# swift_store_auth_version is 2. (string value)
+#swift_store_endpoint_type = publicURL
+
+# A string giving the service type of the swift service to use. This
+# setting is only used if swift_store_auth_version is 2. (string
+# value)
+#swift_store_service_type = object-store
+
+# Container within the account that the account should use for storing
+# images in Swift when using single container mode. In multiple
+# container mode, this will be the prefix for all containers. (string
+# value)
+#swift_store_container = glance
+
+# The size, in MB, that Glance will start chunking image files and do
+# a large object manifest in Swift. (integer value)
+#swift_store_large_object_size = 5120
+
+# The amount of data written to a temporary disk buffer during the
+# process of chunking the image file. (integer value)
+#swift_store_large_object_chunk_size = 200
+
+# A boolean value that determines if we create the container if it
+# does not exist. (boolean value)
+#swift_store_create_container_on_put = false
+
+# If set to True, enables multi-tenant storage mode which causes
+# Glance images to be stored in tenant specific Swift accounts.
+# (boolean value)
+#swift_store_multi_tenant = false
+
+# When set to 0, a single-tenant store will only use one container to
+# store all images. When set to an integer value between 1 and 32, a
+# single-tenant store will use multiple containers to store images,
+# and this value will determine how many containers are created.Used
+# only when swift_store_multi_tenant is disabled. The total number of
+# containers that will be used is equal to 16^N, so if this config
+# option is set to 2, then 16^2=256 containers will be used to store
+# images. (integer value)
+#swift_store_multiple_containers_seed = 0
+
+# A list of tenants that will be granted read/write access on all
+# Swift containers created by Glance in multi-tenant mode. (list
+# value)
+#swift_store_admin_tenants =
+
+# If set to False, disables SSL layer compression of https swift
+# requests. Setting to False may improve performance for images which
+# are already in a compressed format, eg qcow2. (boolean value)
+#swift_store_ssl_compression = true
+
+# The number of times a Swift download will be retried before the
+# request fails. (integer value)
+#swift_store_retry_get_count = 0
+
+# The period of time (in seconds) before token expirationwhen
+# glance_store will try to reques new user token. Default value 60 sec
+# means that if token is going to expire in 1 min then glance_store
+# request new user token. (integer value)
+#swift_store_expire_soon_interval = 60
+
+# If set to True create a trust for each add/get request to Multi-
+# tenant store in order to prevent authentication token to be expired
+# during uploading/downloading data. If set to False then user token
+# is used for Swift connection (so no overhead on trust creation).
+# Please note that this option is considered only and only if
+# swift_store_multi_tenant=True (boolean value)
+#swift_store_use_trusts = true
+
+# The reference to the default swift account/backing store parameters
+# to use for adding new images. (string value)
+#default_swift_reference = ref1
+
+# Version of the authentication service to use. Valid versions are 2
+# and 3 for keystone and 1 (deprecated) for swauth and rackspace.
+# (deprecated - use "auth_version" in swift_store_config_file) (string
+# value)
+#swift_store_auth_version = 2
+
+# The address where the Swift authentication service is listening.
+# (deprecated - use "auth_address" in swift_store_config_file) (string
+# value)
+#swift_store_auth_address = <None>
+
+# The user to authenticate against the Swift authentication service
+# (deprecated - use "user" in swift_store_config_file) (string value)
+#swift_store_user = <None>
+
+# Auth key for the user authenticating against the Swift
+# authentication service. (deprecated - use "key" in
+# swift_store_config_file) (string value)
+#swift_store_key = <None>
+
+# The config file that has the swift account(s)configs. (string value)
+#swift_store_config_file = <None>
+
+# RADOS images will be chunked into objects of this size (in
+# megabytes). For best performance, this should be a power of two.
+# (integer value)
+#rbd_store_chunk_size = 8
+
+# RADOS pool in which images are stored. (string value)
+#rbd_store_pool = images
+
+# RADOS user to authenticate as (only applicable if using Cephx. If
+# <None>, a default will be chosen based on the client. section in
+# rbd_store_ceph_conf) (string value)
+#rbd_store_user = <None>
+
+# Ceph configuration file path. If <None>, librados will locate the
+# default config. If using cephx authentication, this file should
+# include a reference to the right keyring in a client.<USER> section
+# (string value)
+#rbd_store_ceph_conf = /etc/ceph/ceph.conf
+
+# Timeout value (in seconds) used when connecting to ceph cluster. If
+# value <= 0, no timeout is set and default librados value is used.
+# (integer value)
+#rados_connect_timeout = 0
+
+# Info to match when looking for cinder in the service catalog. Format
+# is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volumev2::publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v2/%(tenant)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node. If specified, it will be used to locate
+# OpenStack services for stores. (string value)
+# Deprecated group/name - [DEFAULT]/os_region_name
+#cinder_os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests.
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Time period of time in seconds to wait for a cinder volume
+# transition to complete. (integer value)
+#cinder_state_transition_timeout = 300
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = false
+
+# The address where the Cinder authentication service is listening. If
+# <None>, the cinder endpoint in the service catalog is used. (string
+# value)
+#cinder_store_auth_address = <None>
+
+# User name to authenticate against Cinder. If <None>, the user of
+# current context is used. (string value)
+#cinder_store_user_name = <None>
+
+# Password for the user authenticating against Cinder. If <None>, the
+# current context auth token is used. (string value)
+#cinder_store_password = <None>
+
+# Project name where the image is stored in Cinder. If <None>, the
+# project in current context is used. (string value)
+#cinder_store_project_name = <None>
+
+# Path to the rootwrap configuration file to use for running commands
+# as root. (string value)
+#rootwrap_config = /etc/glance/rootwrap.conf
+
+# The host where the S3 server is listening. (string value)
+#s3_store_host = <None>
+
+# The S3 query token access key. (string value)
+#s3_store_access_key = <None>
+
+# The S3 query token secret key. (string value)
+#s3_store_secret_key = <None>
+
+# The S3 bucket to be used to store the Glance data. (string value)
+#s3_store_bucket = <None>
+
+# The local directory where uploads will be staged before they are
+# transferred into S3. (string value)
+#s3_store_object_buffer_dir = <None>
+
+# A boolean to determine if the S3 bucket should be created on upload
+# if it does not exist or if an error should be returned to the user.
+# (boolean value)
+#s3_store_create_bucket_on_put = false
+
+# The S3 calling format used to determine the bucket. Either subdomain
+# or path can be used. (string value)
+#s3_store_bucket_url_format = subdomain
+
+# What size, in MB, should S3 start chunking image files and do a
+# multipart upload in S3. (integer value)
+#s3_store_large_object_size = 100
+
+# What multipart upload part size, in MB, should S3 use when uploading
+# parts. The size must be greater than or equal to 5M. (integer value)
+#s3_store_large_object_chunk_size = 10
+
+# The number of thread pools to perform a multipart upload in S3.
+# (integer value)
+#s3_store_thread_pools = 10
+
+# Enable the use of a proxy. (boolean value)
+#s3_store_enable_proxy = false
+
+# Address or hostname for the proxy server. (string value)
+#s3_store_proxy_host = <None>
+
+# The port to use when connecting over a proxy. (integer value)
+#s3_store_proxy_port = 8080
+
+# The username to connect to the proxy. (string value)
+#s3_store_proxy_user = <None>
+
+# The password to use when connecting over a proxy. (string value)
+#s3_store_proxy_password = <None>
+
+# Images will be chunked into objects of this size (in megabytes). For
+# best performance, this should be a power of two. (integer value)
+#sheepdog_store_chunk_size = 64
+
+# Port of sheep daemon. (integer value)
+#sheepdog_store_port = 7000
+
+# IP address of sheep daemon. (string value)
+#sheepdog_store_address = localhost
+
+# Directory to which the Filesystem backend store writes images.
+# (string value)
+#filesystem_store_datadir = /var/lib/glance/images
+
+# List of directories and its priorities to which the Filesystem
+# backend store writes images. (multi valued)
+#filesystem_store_datadirs =
+
+# The path to a file which contains the metadata to be returned with
+# any location associated with this store.  The file must contain a
+# valid JSON object. The object should contain the keys 'id' and
+# 'mountpoint'. The value for both keys should be 'string'. (string
+# value)
+#filesystem_store_metadata_file = <None>
+
+# The required permission for created image file. In this way the user
+# other service used, e.g. Nova, who consumes the image could be the
+# exclusive member of the group that owns the files created. Assigning
+# it less then or equal to zero means don't change the default
+# permission of the file. This value will be decoded as an octal
+# digit. (integer value)
+#filesystem_store_file_perm = 0
+
+# ESX/ESXi or vCenter Server target system. The server value can be an
+# IP address or a DNS name. (string value)
+#vmware_server_host = <None>
+
+# Username for authenticating with VMware ESX/VC server. (string
+# value)
+#vmware_server_username = <None>
+
+# Password for authenticating with VMware ESX/VC server. (string
+# value)
+#vmware_server_password = <None>
+
+# Number of times VMware ESX/VC server API must be retried upon
+# connection related issues. (integer value)
+#vmware_api_retry_count = 10
+
+# The interval used for polling remote tasks invoked on VMware ESX/VC
+# server. (integer value)
+#vmware_task_poll_interval = 5
+
+# The name of the directory where the glance images will be stored in
+# the VMware datastore. (string value)
+#vmware_store_image_dir = /openstack_glance
+
+# If true, the ESX/vCenter server certificate is not verified. If
+# false, then the default CA truststore is used for verification. This
+# option is ignored if "vmware_ca_file" is set. (boolean value)
+# Deprecated group/name - [DEFAULT]/vmware_api_insecure
+#vmware_insecure = false
+
+# Specify a CA bundle file to use in verifying the ESX/vCenter server
+# certificate. (string value)
+#vmware_ca_file = <None>
+
+# A list of datastores where the image can be stored. This option may
+# be specified multiple times for specifying multiple datastores. The
+# datastore name should be specified after its datacenter path,
+# seperated by ":". An optional weight may be given after the
+# datastore name, seperated again by ":". Thus, the required format
+# becomes <datacenter_path>:<datastore_name>:<optional_weight>. When
+# adding an image, the datastore with highest weight will be selected,
+# unless there is not enough free space available in cases where the
+# image size is already known. If no weight is given, it is assumed to
+# be zero and the directory will be considered for selection last. If
+# multiple datastores have the same weight, then the one with the most
+# free space available is selected. (multi valued)
+#vmware_datastores =
+
+
+[keystone_authtoken]
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete public Identity API endpoint. (string value)
+#auth_uri = <None>
+
+# API version of the admin Identity API endpoint. (string value)
+#auth_version = <None>
+
+# Do not handle authorization requests within the middleware, but
+# delegate the authorization decision to downstream WSGI components.
+# (boolean value)
+#delay_auth_decision = false
+
+# Request timeout value for communicating with Identity API server.
+# (integer value)
+#http_connect_timeout = <None>
+
+# How many times are we trying to reconnect when communicating with
+# Identity API Server. (integer value)
+#http_request_max_retries = 3
+
+# Env key for the swift cache. (string value)
+#cache = <None>
+
+# Required if identity server requires client certificate (string
+# value)
+#certfile = <None>
+
+# Required if identity server requires client certificate (string
+# value)
+#keyfile = <None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. Defaults to system CAs. (string value)
+#cafile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# The region in which the identity server can be found. (string value)
+#region_name = <None>
+
+# Directory used to cache files related to PKI tokens. (string value)
+#signing_dir = <None>
+
+# Optionally specify a list of memcached server(s) to use for caching.
+# If left undefined, tokens will instead be cached in-process. (list
+# value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers = <None>
+
+# In order to prevent excessive effort spent validating tokens, the
+# middleware caches previously-seen tokens for a configurable duration
+# (in seconds). Set to -1 to disable caching completely. (integer
+# value)
+#token_cache_time = 300
+
+# Determines the frequency at which the list of revoked tokens is
+# retrieved from the Identity service (in seconds). A high number of
+# revocation events combined with a low cache duration may
+# significantly reduce performance. (integer value)
+#revocation_cache_time = 10
+
+# (Optional) If defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token data is
+# encrypted and authenticated in the cache. If the value is not one of
+# these options or empty, auth_token will raise an exception on
+# initialization. (string value)
+# Allowed values: None, MAC, ENCRYPT
+#memcache_security_strategy = None
+
+# (Optional, mandatory if memcache_security_strategy is defined) This
+# string is used for key derivation. (string value)
+#memcache_secret_key = <None>
+
+# (Optional) Number of seconds memcached server is considered dead
+# before it is tried again. (integer value)
+#memcache_pool_dead_retry = 300
+
+# (Optional) Maximum total number of open connections to every
+# memcached server. (integer value)
+#memcache_pool_maxsize = 10
+
+# (Optional) Socket timeout in seconds for communicating with a
+# memcached server. (integer value)
+#memcache_pool_socket_timeout = 3
+
+# (Optional) Number of seconds a connection to memcached is held
+# unused in the pool before it is closed. (integer value)
+#memcache_pool_unused_timeout = 60
+
+# (Optional) Number of seconds that an operation will wait to get a
+# memcached client connection from the pool. (integer value)
+#memcache_pool_conn_get_timeout = 10
+
+# (Optional) Use the advanced (eventlet safe) memcached client pool.
+# The advanced pool will only work under python 2.x. (boolean value)
+#memcache_use_advanced_pool = false
+
+# (Optional) Indicate whether to set the X-Service-Catalog header. If
+# False, middleware will not ask for service catalog on token
+# validation and will not set the X-Service-Catalog header. (boolean
+# value)
+#include_service_catalog = true
+
+# Used to control the use and type of token binding. Can be set to:
+# "disabled" to not check token binding. "permissive" (default) to
+# validate binding information if the bind type is of a form known to
+# the server and ignore it if not. "strict" like "permissive" but if
+# the bind type is unknown the token will be rejected. "required" any
+# form of token binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string value)
+#enforce_token_bind = permissive
+
+# If true, the revocation list will be checked for cached tokens. This
+# requires that PKI tokens are configured on the identity server.
+# (boolean value)
+#check_revocations_for_cached = false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a single
+# algorithm or multiple. The algorithms are those supported by Python
+# standard hashlib.new(). The hashes will be tried in the order given,
+# so put the preferred one first for performance. The result of the
+# first hash will be stored in the cache. This will typically be set
+# to multiple values only while migrating from a less secure algorithm
+# to a more secure one. Once all the old tokens are expired this
+# option should be set to a single value for better performance. (list
+# value)
+#hash_algorithms = md5
+
+# Authentication type to load (unknown value)
+# Deprecated group/name - [DEFAULT]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (unknown
+# value)
+#auth_section = <None>
+
+
+[matchmaker_redis]
+
+#
+# From oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host = 127.0.0.1
+
+# Use this port to connect to redis host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#port = 6379
+
+# Password for Redis server (optional). (string value)
+#password =
+
+# List of Redis Sentinel hosts (fault tolerance mode) e.g.
+# [host:port, host1:port ... ] (list value)
+#sentinel_hosts =
+
+# Redis replica set name. (string value)
+#sentinel_group_name = oslo-messaging-zeromq
+
+# Time in ms to wait between connection attempts. (integer value)
+#wait_timeout = 500
+
+# Time in ms to wait before the transaction is killed. (integer value)
+#check_timeout = 20000
+
+# Timeout in ms on blocking socket operations (integer value)
+#socket_timeout = 1000
+
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# address prefix used when sending to a specific server (string value)
+# Deprecated group/name - [amqp1]/server_request_prefix
+#server_request_prefix = exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+# Deprecated group/name - [amqp1]/broadcast_prefix
+#broadcast_prefix = broadcast
+
+# address prefix when sending to any server in group (string value)
+# Deprecated group/name - [amqp1]/group_request_prefix
+#group_request_prefix = unicast
+
+# Name for the AMQP container (string value)
+# Deprecated group/name - [amqp1]/container_name
+#container_name = <None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+# Deprecated group/name - [amqp1]/idle_timeout
+#idle_timeout = 0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+# Deprecated group/name - [amqp1]/trace
+#trace = false
+
+# CA certificate PEM file to verify server certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_ca_file
+#ssl_ca_file =
+
+# Identifying certificate PEM file to present to clients (string
+# value)
+# Deprecated group/name - [amqp1]/ssl_cert_file
+#ssl_cert_file =
+
+# Private key PEM file used to sign cert_file certificate (string
+# value)
+# Deprecated group/name - [amqp1]/ssl_key_file
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+# Deprecated group/name - [amqp1]/ssl_key_password
+#ssl_key_password = <None>
+
+# Accept clients using either SSL or plain TCP (boolean value)
+# Deprecated group/name - [amqp1]/allow_insecure_clients
+#allow_insecure_clients = false
+
+# Space separated list of acceptable SASL mechanisms (string value)
+# Deprecated group/name - [amqp1]/sasl_mechanisms
+#sasl_mechanisms =
+
+# Path to directory that contains the SASL configuration (string
+# value)
+# Deprecated group/name - [amqp1]/sasl_config_dir
+#sasl_config_dir =
+
+# Name of configuration file (without .conf suffix) (string value)
+# Deprecated group/name - [amqp1]/sasl_config_name
+#sasl_config_name =
+
+# User name for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/username
+#username =
+
+# Password for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/password
+#password =
+
+
+[oslo_messaging_notifications]
+
+#
+# From oslo.messaging
+#
+
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
+
+# A URL representing the messaging driver to use for notifications. If
+# not set, we fall back to the same configuration used for RPC.
+# (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_auto_delete
+#amqp_auto_delete = false
+
+# SSL version to use (valid only if SSL enabled). Valid values are
+# TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be
+# available on some distributions. (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_version
+#kombu_ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
+#kombu_ssl_keyfile =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
+#kombu_ssl_certfile =
+
+# SSL certification authority file (valid only if SSL enabled).
+# (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
+#kombu_ssl_ca_certs =
+
+# How long to wait before reconnecting in response to an AMQP consumer
+# cancel notification. (floating point value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
+#kombu_reconnect_delay = 1.0
+
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression
+# will not be used. This option may notbe available in future
+# versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client beforce abandoning to send it its
+# replies. This value should not be longer than rpc_response_timeout.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we
+# are currently connected to becomes unavailable. Takes effect only if
+# more than one RabbitMQ node is provided in config. (string value)
+# Allowed values: round-robin, shuffle
+#kombu_failover_strategy = round-robin
+
+# The RabbitMQ broker address where a single node is used. (string
+# value)
+# Deprecated group/name - [DEFAULT]/rabbit_host
+#rabbit_host = localhost
+
+# The RabbitMQ broker port where a single node is used. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# Deprecated group/name - [DEFAULT]/rabbit_port
+#rabbit_port = 5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+# Deprecated group/name - [DEFAULT]/rabbit_hosts
+#rabbit_hosts = $rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
+#rabbit_use_ssl = false
+
+# The RabbitMQ userid. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_userid
+#rabbit_userid = guest
+
+# The RabbitMQ password. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_password
+#rabbit_password = guest
+
+# The RabbitMQ login method. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_login_method
+#rabbit_login_method = AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
+#rabbit_virtual_host = /
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval = 1
+
+# How long to backoff for between retries when connecting to RabbitMQ.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
+#rabbit_retry_backoff = 2
+
+# Maximum interval of RabbitMQ connection retries. Default is 30
+# seconds. (integer value)
+#rabbit_interval_max = 30
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_max_retries
+#rabbit_max_retries = 0
+
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. In RabbitMQ 3.0,
+# queue mirroring is no longer controlled by the x-ha-policy argument
+# when declaring a queue. If you just want to make sure that all
+# queues (except  those with auto-generated names) are mirrored across
+# all nodes, run: "rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha-
+# mode": "all"}' " (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
+#rabbit_ha_queues = false
+
+# Positive integer representing duration in seconds for queue TTL
+# (x-expires). Queues which are unused for the duration of the TTL are
+# automatically deleted. The parameter affects only reply and fanout
+# queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 600
+
+# Specifies the number of messages to prefetch. Setting to zero allows
+# unlimited messages. (integer value)
+#rabbit_qos_prefetch_count = 0
+
+# Number of seconds after which the Rabbit broker is considered down
+# if heartbeat's keep-alive fails (0 disable the heartbeat).
+# EXPERIMENTAL (integer value)
+#heartbeat_timeout_threshold = 60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate = 2
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/fake_rabbit
+#fake_rabbit = false
+
+# Maximum number of channels to allow (integer value)
+#channel_max = <None>
+
+# The maximum byte size for an AMQP frame (integer value)
+#frame_max = <None>
+
+# How often to send heartbeats for consumer's connections (integer
+# value)
+#heartbeat_interval = 1
+
+# Enable SSL (boolean value)
+#ssl = <None>
+
+# Arguments passed to ssl.wrap_socket (dict value)
+#ssl_options = <None>
+
+# Set socket timeout in seconds for connection's socket (floating
+# point value)
+#socket_timeout = 0.25
+
+# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating
+# point value)
+#tcp_user_timeout = 0.25
+
+# Set delay for reconnection to some host which has connection error
+# (floating point value)
+#host_connection_reconnect_delay = 0.25
+
+# Maximum number of connections to keep queued. (integer value)
+#pool_max_size = 10
+
+# Maximum number of connections to create above `pool_max_size`.
+# (integer value)
+#pool_max_overflow = 0
+
+# Default number of seconds to wait for a connections to available
+# (integer value)
+#pool_timeout = 30
+
+# Lifetime of a connection (since creation) in seconds or None for no
+# recycling. Expired connections are closed on acquire. (integer
+# value)
+#pool_recycle = 600
+
+# Threshold at which inactive (since release) connections are
+# considered stale in seconds or None for no staleness. Stale
+# connections are closed on acquire. (integer value)
+#pool_stale = 60
+
+# Persist notification messages. (boolean value)
+#notification_persistence = false
+
+# Exchange name for for sending notifications (string value)
+#default_notification_exchange = ${control_exchange}_notification
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# notification listener. (integer value)
+#notification_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during
+# sending notification, -1 means infinite retry. (integer value)
+#default_notification_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending notification message (floating point value)
+#notification_retry_delay = 0.25
+
+# Time to live for rpc queues without consumers in seconds. (integer
+# value)
+#rpc_queue_expiration = 60
+
+# Exchange name for sending RPC messages (string value)
+#default_rpc_exchange = ${control_exchange}_rpc
+
+# Exchange name for receiving RPC replies (string value)
+#rpc_reply_exchange = ${control_exchange}_rpc_reply
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# rpc listener. (integer value)
+#rpc_listener_prefetch_count = 100
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# rpc reply listener. (integer value)
+#rpc_reply_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during
+# sending reply. -1 means infinite retry during rpc_timeout (integer
+# value)
+#rpc_reply_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending reply. (floating point value)
+#rpc_reply_retry_delay = 0.25
+
+# Reconnecting retry count in case of connectivity problem during
+# sending RPC message, -1 means infinite retry. If actual retry
+# attempts in not 0 the rpc request could be processed more then one
+# time (integer value)
+#default_rpc_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending RPC message (floating point value)
+#rpc_retry_delay = 0.25
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string
+# value)
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be
+# relative to any directory in the search path defined by the
+# config_dir option, or absolute paths. The file defined by
+# policy_file must exist for these directories to be searched.
+# Missing or empty directories are ignored. (multi valued)
+# Deprecated group/name - [DEFAULT]/policy_dirs
+#policy_dirs = policy.d
+
+
+[paste_deploy]
+
+#
+# From glance.registry
+#
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-api-keystone] use the value "keystone" (string
+# value)
+#flavor = <None>
+
+# Name of the paste configuration file. (string value)
+config_file = {RIFT_INSTALL}/etc/glance/glance-registry-paste.ini
+
+
+[profiler]
+
+#
+# From glance.registry
+#
+
+# If False fully disable profiling feature. (boolean value)
+#enabled = false
+
+# If False doesn't trace SQL requests. (boolean value)
+#trace_sqlalchemy = false
+
+# Secret key to use to sign Glance API and Glance Registry services
+# tracing messages. (string value)
+#hmac_keys = SECRET_KEY
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/policy.json b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/policy.json
new file mode 100644
index 0000000..f49bc08
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/policy.json
@@ -0,0 +1,61 @@
+{
+    "context_is_admin":  "role:admin",
+    "default": "",
+
+    "add_image": "",
+    "delete_image": "",
+    "get_image": "",
+    "get_images": "",
+    "modify_image": "",
+    "publicize_image": "role:admin",
+    "copy_from": "",
+
+    "download_image": "",
+    "upload_image": "",
+
+    "delete_image_location": "",
+    "get_image_location": "",
+    "set_image_location": "",
+
+    "add_member": "",
+    "delete_member": "",
+    "get_member": "",
+    "get_members": "",
+    "modify_member": "",
+
+    "manage_image_cache": "role:admin",
+
+    "get_task": "role:admin",
+    "get_tasks": "role:admin",
+    "add_task": "role:admin",
+    "modify_task": "role:admin",
+
+    "deactivate": "",
+    "reactivate": "",
+
+    "get_metadef_namespace": "",
+    "get_metadef_namespaces":"",
+    "modify_metadef_namespace":"",
+    "add_metadef_namespace":"",
+
+    "get_metadef_object":"",
+    "get_metadef_objects":"",
+    "modify_metadef_object":"",
+    "add_metadef_object":"",
+
+    "list_metadef_resource_types":"",
+    "get_metadef_resource_type":"",
+    "add_metadef_resource_type_association":"",
+
+    "get_metadef_property":"",
+    "get_metadef_properties":"",
+    "modify_metadef_property":"",
+    "add_metadef_property":"",
+
+    "get_metadef_tag":"",
+    "get_metadef_tags":"",
+    "modify_metadef_tag":"",
+    "add_metadef_tag":"",
+    "add_metadef_tags":""
+
+}
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/schema-image.json b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/schema-image.json
new file mode 100644
index 0000000..69c2f85
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/schema-image.json
@@ -0,0 +1,28 @@
+{
+    "kernel_id": {
+        "type": ["null", "string"],
+        "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+        "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image."
+    },
+    "ramdisk_id": {
+        "type": ["null", "string"],
+        "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+        "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image."
+    },
+    "instance_uuid": {
+        "type": "string",
+        "description": "Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.)"
+    },
+    "architecture": {
+        "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
+        "type": "string"
+    },
+    "os_distro": {
+        "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
+        "type": "string"
+    },
+    "os_version": {
+        "description": "Operating system version as specified by the distributor",
+        "type": "string"
+    }
+}
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/__init__.py b/rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/__init__.py
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/client.py b/rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/client.py
new file mode 100644
index 0000000..10df45b
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/client.py
@@ -0,0 +1,174 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import concurrent.futures
+
+import gi
+gi.require_version("RwImageMgmtYang", "1.0")
+from gi.repository import (
+    RwImageMgmtYang,
+)
+
+
+class UploadJobError(Exception):
+    pass
+
+
+class UploadJobFailed(UploadJobError):
+    pass
+
+
+class UploadJobCancelled(UploadJobFailed):
+    pass
+
+
+class UploadJobClient(object):
+    """ An upload job DTS client
+
+    This class wraps the DTS upload job actions to be more easily reused across
+    various components
+    """
+    def __init__(self, log, loop, dts):
+        self._log = log
+        self._loop = loop
+        self._dts = dts
+
+    def create_job(self, image_name, image_checksum, cloud_account_names=None):
+        """ Create an image upload_job and return an UploadJob instance
+
+        Arguments:
+            image_name - The name of the image in the image catalog
+            image_checksum - The checksum of the image in the catalog
+            cloud_account_names - Names of the cloud accounts to upload the image to.
+                                  None uploads the image to all cloud accounts.
+
+        Returns:
+            An UploadJob instance
+        """
+        create_job_msg = RwImageMgmtYang.CreateUploadJob.from_dict({
+            "onboarded_image": {
+                "image_name": image_name,
+                "image_checksum": image_checksum,
+                }
+            })
+
+        if cloud_account_names is not None:
+            create_job_msg.cloud_account = cloud_account_names
+
+        query_iter = yield from self._dts.query_rpc(
+                "I,/rw-image-mgmt:create-upload-job",
+                0,
+                create_job_msg,
+                )
+
+        for fut_resp in query_iter:
+            rpc_result = (yield from fut_resp).result
+
+            job_id = rpc_result.job_id
+
+        return UploadJob(self._log, self._loop, self._dts, job_id)
+
+    def create_job_threadsafe(self, image_name, image_checksum, cloud_account_names=None):
+        """ A thread-safe, syncronous wrapper for create_job """
+        future = concurrent.futures.Future()
+
+        def on_done(asyncio_future):
+            if asyncio_future.exception() is not None:
+                future.set_exception(asyncio_future.exception())
+
+            elif asyncio_future.result() is not None:
+                future.set_result(asyncio_future.result())
+
+        def add_task():
+            task = self._loop.create_task(
+                    self.create_job(image_name, image_checksum, cloud_account_names)
+                    )
+            task.add_done_callback(on_done)
+
+        self._loop.call_soon_threadsafe(add_task)
+        return future.result()
+
+
+class UploadJob(object):
+    """ A handle for a image upload job """
+    def __init__(self, log, loop, dts, job_id):
+        self._log = log
+        self._loop = loop
+        self._dts = dts
+        self._job_id = job_id
+
+    @asyncio.coroutine
+    def wait_until_complete(self):
+        """ Wait until the upload job reaches a terminal state
+
+        Raises:
+            UploadJobError: A generic exception occured in the upload job
+            UploadJobFailed: The upload job failed
+            UploadJobCancelled: The upload job was cancelled
+        """
+        self._log.debug("waiting for upload job %s to complete", self._job_id)
+        while True:
+            query_iter = yield from self._dts.query_read(
+                "D,/rw-image-mgmt:upload-jobs/rw-image-mgmt:job[rw-image-mgmt:id='{}']".format(
+                    self._job_id
+                )
+            )
+            job_status_msg = None
+            for fut_resp in query_iter:
+                job_status_msg = (yield from fut_resp).result
+                break
+
+            if job_status_msg is None:
+                raise UploadJobError("did not get a status response for job_id: %s",
+                                     self._job_id)
+
+            if job_status_msg.status == "COMPLETED":
+                msg = "upload job %s completed successfully" % self._job_id
+                self._log.debug(msg)
+                return
+
+            elif job_status_msg.status == "FAILED":
+                msg = "upload job %s as not successful: %s" % (self._job_id, job_status_msg.status)
+                self._log.error(msg)
+                raise UploadJobFailed(msg)
+
+            elif job_status_msg.status == "CANCELLED":
+                msg = "upload job %s was cancelled" % self._job_id
+                self._log.error(msg)
+                raise UploadJobCancelled(msg)
+
+            yield from asyncio.sleep(.5, loop=self._loop)
+
+    def wait_until_complete_threadsafe(self):
+        """ A thread-safe, synchronous wrapper for wait_until_complete """
+
+        future = concurrent.futures.Future()
+
+        def on_done(asyncio_future):
+            if asyncio_future.exception() is not None:
+                future.set_exception(asyncio_future.exception())
+                return
+
+            future.set_result(asyncio_future.result())
+
+        def add_task():
+            task = self._loop.create_task(self.wait_until_complete())
+            task.add_done_callback(on_done)
+
+        self._loop.call_soon_threadsafe(add_task)
+        return future.result()
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/__init__.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/__init__.py
new file mode 100644
index 0000000..c5c582e
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/__init__.py
@@ -0,0 +1 @@
+from .tasklet import ImageManagerTasklet
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/glance_client.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/glance_client.py
new file mode 100644
index 0000000..614c152
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/glance_client.py
@@ -0,0 +1,357 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import itertools
+import logging
+import os
+import glanceclient
+import keystoneclient.v3.client as keystone_client
+from keystoneauth1 import (
+    identity as keystone_identity,
+    session as keystone_session
+    )
+
+from gi.repository import RwcalYang
+
+logger = logging.getLogger(name=__name__)
+
+
+class OpenstackImageError(Exception):
+    pass
+
+
+class OpenstackNonUniqueImageError(OpenstackImageError):
+    pass
+
+
+class OpenstackImageCreateError(Exception):
+    pass
+
+
+class OpenstackImageDeleteError(Exception):
+    pass
+
+
+class InvalidImageError(Exception):
+    pass
+
+
+class OpenstackAccount(object):
+    def __init__(self, auth_url, tenant, username, password):
+        self.auth_url = auth_url
+        self.tenant = tenant
+        self.username = username
+        self.password = password
+
+
+class OpenstackImage(object):
+    """ This value class encapsultes the RIFT-relevent glance image fields """
+
+    FIELDS = ["id", "name", "checksum", "disk_format",
+              "container_format", "size", "properties", "status"]
+    OPTIONAL_FIELDS = ["id", "checksum", "location"]
+
+    def __init__(self, name, disk_format, container_format, size,
+                 properties=None, id=None, checksum=None, status="saving",
+                 location=None):
+        self.name = name
+        self.disk_format = disk_format
+        self.container_format = container_format
+        self.size = size
+        self.properties = properties if properties is not None else {}
+        self.status = status
+
+        self.id = id
+        self.checksum = checksum
+
+    @classmethod
+    def from_image_response(cls, image):
+        """ Convert a image response from glance into a OpenstackImage
+
+        Arguments:
+            image - A glance image object (from glance_client.images.list() for example)
+
+        Returns:
+            An instance of OpenstackImage
+
+        Raises:
+            OpenstackImageError - Could not convert the response into a OpenstackImage object
+        """
+        missing_fields = [field for field in cls.FIELDS
+                          if field not in cls.OPTIONAL_FIELDS and not hasattr(image, field)]
+        if missing_fields:
+            raise OpenstackImageError(
+                    "Openstack image is missing required fields: %s" % missing_fields
+                    )
+
+        kwargs = {field: getattr(image, field) for field in cls.FIELDS}
+
+        return cls(**kwargs)
+
+
+class OpenstackKeystoneClient(object):
+    """ This class wraps the Keystone Client """
+    def __init__(self, ks_client):
+        self._ks_client = ks_client
+
+    @property
+    def auth_token(self):
+        return self._ks_client.auth_token
+
+    @classmethod
+    def from_openstack_account(cls, os_account):
+        ks_client = keystone_client.Client(
+                insecure=True,
+                auth_url=os_account.auth_url,
+                username=os_account.username,
+                password=os_account.password,
+                tenant_name=os_account.tenant
+                )
+
+        return cls(ks_client)
+
+    @property
+    def glance_endpoint(self):
+        """ Return the glance endpoint from the keystone service """
+        glance_ep = self._ks_client.service_catalog.url_for(
+                service_type='image',
+                endpoint_type='publicURL'
+                )
+
+        return glance_ep
+
+
+class OpenstackGlanceClient(object):
+    def __init__(self, log, glance_client):
+        self._log = log
+        self._client = glance_client
+
+    @classmethod
+    def from_ks_client(cls, log, ks_client):
+        """ Create a OpenstackGlanceClient from a keystone client instance
+
+        Arguments:
+            log - logger instance
+            ks_client - A keystone client instance
+        """
+
+        glance_ep = ks_client.glance_endpoint
+        glance_client = glanceclient.Client(
+                '1',
+                glance_ep,
+                token=ks_client.auth_token,
+                )
+
+        return cls(log, glance_client)
+
+    @classmethod
+    def from_token(cls, log, host, port, token):
+        """ Create a OpenstackGlanceClient instance using a keystone auth token
+
+        Arguments:
+            log - logger instance
+            host - the glance host
+            port - the glance port
+            token - the keystone token
+
+        Returns:
+            A OpenstackGlanceClient instance
+        """
+        endpoint = "http://{}:{}".format(host, port)
+        glance_client = glanceclient.Client("1", endpoint, token=token)
+        return cls(log, glance_client)
+
+    def get_image_list(self):
+        """ Return the list of images from the Glance server
+
+        Returns:
+            A list of OpenstackImage instances
+        """
+        images = []
+        for image in itertools.chain(
+                self._client.images.list(is_public=False),
+                self._client.images.list(is_public=True)
+                ):
+            images.append(OpenstackImage.from_image_response(image))
+
+        return images
+
+    def get_image_data(self, image_id):
+        """ Return a image bytes generator from a image id
+
+        Arguments:
+            image_id - An image id that exists on the glance server
+
+        Returns:
+            An generator which produces the image data bytestrings
+
+        Raises:
+            OpenstackImageError - Could not find the image id
+        """
+
+        try:
+            self._client.images.get(image_id)
+        except Exception as e:
+            msg = "Failed to find image from image: %s" % image_id
+            self._log.exception(msg)
+            raise OpenstackImageError(msg) from e
+
+        img_data = self._client.images.data(image_id)
+        return img_data
+
+    def find_active_image(self, id=None, name=None, checksum=None):
+        """ Find an active images on the glance server
+
+        Arguments:
+            id - the image id to match
+            name - the image name to match
+            checksum - the image checksum to match
+
+        Returns:
+            A OpenstackImage instance
+
+        Raises:
+            OpenstackImageError - could not find a matching image
+                                  with matching image name and checksum
+        """
+        if id is None and name is None:
+            raise ValueError("image id or image name must be provided")
+
+        self._log.debug("attempting to find active image with id %s name %s and checksum %s",
+                        id, name, checksum)
+
+        found_image = None
+
+        image_list = self.get_image_list()
+        self._log.debug("got image list from openstack: %s", image_list)
+        for image in self.get_image_list():
+            self._log.debug(image)
+            if image.status != "active":
+                continue
+
+            if id is not None:
+                if image.id != id:
+                    continue
+
+            if name is not None:
+                if image.name != name:
+                    continue
+
+            if checksum is not None:
+                if image.checksum != checksum:
+                    continue
+
+            if found_image is not None:
+                raise OpenstackNonUniqueImageError(
+                    "Found multiple images that matched the criteria.  Use image id to disambiguate."
+                    )
+
+            found_image = image
+
+        if found_image is None:
+            raise OpenstackImageError(
+                    "could not find an active image with id %s name %s and checksum %s" %
+                    (id, name, checksum))
+
+        return OpenstackImage.from_image_response(found_image)
+
+    def create_image_from_hdl(self, image, file_hdl):
+        """ Create an image on the glance server a file handle
+
+        Arguments:
+            image - An OpenstackImage instance
+            file_hdl - An open image file handle
+
+        Raises:
+            OpenstackImageCreateError - Could not upload the image
+        """
+        try:
+            self._client.images.create(
+                    name=image.name,
+                    is_public="False",
+                    disk_format=image.disk_format,
+                    container_format=image.container_format,
+                    data=file_hdl
+                    )
+        except Exception as e:
+            msg = "Failed to Openstack upload image"
+            self._log.exception(msg)
+            raise OpenstackImageCreateError(msg) from e
+
+    def create_image_from_url(self, image_url, image_name, image_checksum=None,
+                              disk_format=None, container_format=None):
+        """ Create an image on the glance server from a image url
+
+        Arguments:
+            image_url - An HTTP image url
+            image_name - An openstack image name (filename with proper extension)
+            image checksum - The image md5 checksum
+
+        Raises:
+            OpenstackImageCreateError - Could not create the image
+        """
+        def disk_format_from_image_name(image_name):
+            _, image_ext = os.path.splitext(image_name)
+            if not image_ext:
+                raise InvalidImageError("image name must have an extension")
+
+            # Strip off the .
+            image_ext = image_ext[1:]
+
+            if not hasattr(RwcalYang.DiskFormat, image_ext.upper()):
+                raise InvalidImageError("unknown image extension for disk format: %s", image_ext)
+
+            disk_format = image_ext.lower()
+            return disk_format
+
+        # If the disk format was not provided, attempt to match via the file
+        # extension.
+        if disk_format is None:
+            disk_format = disk_format_from_image_name(image_name)
+
+        if container_format is None:
+            container_format = "bare"
+
+        create_args = dict(
+            location=image_url,
+            name=image_name,
+            is_public="True",
+            disk_format=disk_format,
+            container_format=container_format,
+            )
+
+        if image_checksum is not None:
+            create_args["checksum"] = image_checksum
+
+        try:
+            self._log.debug("creating an image from url: %s", create_args)
+            image = self._client.images.create(**create_args)
+        except Exception as e:
+            msg = "Failed to create image from url in openstack"
+            self._log.exception(msg)
+            raise OpenstackImageCreateError(msg) from e
+
+        return OpenstackImage.from_image_response(image)
+
+    def delete_image_from_id(self, image_id):
+        self._log.info("Deleting image from catalog: %s", image_id)
+        try:
+            image = self._client.images.delete(image_id)
+        except Exception as e:
+            msg = "Failed to delete image %s in openstack" % image_id
+            self._log.exception(msg)
+            raise OpenstackImageDeleteError(msg)
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/glance_proxy_server.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/glance_proxy_server.py
new file mode 100644
index 0000000..9b3972e
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/glance_proxy_server.py
@@ -0,0 +1,276 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import json
+
+from .lib import quickproxy
+import rift.tasklets.tornado
+
+
+class GlanceConfig(object):
+    DEFAULT_HOST = "127.0.0.1"
+    DEFAULT_PORT = 9292
+    DEFAULT_TOKEN = "test"
+
+    def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, token=DEFAULT_TOKEN):
+        self.host = host
+        self.port = port
+        self.token = token
+
+
+class GlanceImageCreateRequest(object):
+    def __init__(self, name, size, checksum, disk_format, container_format):
+        self.name = name
+        self.size = size
+        self.checksum = checksum
+        self.disk_format = disk_format
+        self.container_format = container_format
+
+    def __repr__(self):
+        return "{}({})".format(
+                self.__class__.__name__,
+                dict(
+                    name=self.name,
+                    size=self.size,
+                    checksum=self.checksum,
+                    )
+                )
+
+    @classmethod
+    def from_header_dict(cls, header_dict):
+        """
+        curl -i -X POST -H 'x-image-meta-container_format: bare' -H
+        'Transfer-Encoding: chunked' -H 'User-Agent: python-glanceclient' -H
+        'x-image-meta-size: 13167616' -H 'x-image-meta-is_public: False' -H
+        'X-Auth-Token: test' -H 'Content-Type: application/octet-stream' -H
+        'x-image-meta-checksum: 64d7c1cd2b6f60c92c14662941cb7913' -H
+        'x-image-meta-disk_format: raw' -H 'x-image-meta-name:
+        cirros-0.3.2-x86_64-disk.img'
+        """
+
+        name = header_dict["x-image-meta-name"]
+        try:
+            size = int(header_dict["x-image-meta-size"])
+        except KeyError:
+            size = None
+
+        try:
+            checksum = header_dict["x-image-meta-checksum"]
+        except KeyError:
+            checksum = None
+
+        disk_format = header_dict["x-image-meta-disk_format"]
+        container_format = header_dict["x-image-meta-container_format"]
+
+        return cls(name=name, size=size, checksum=checksum,
+                   disk_format=disk_format, container_format=container_format)
+
+
+class GlanceImageCreateResponse(object):
+    def __init__(self, id, name, status, size, checksum):
+        self.id = id
+        self.name = name
+        self.status = status
+        self.size = size
+        self.checksum = checksum
+
+    def __repr__(self):
+        return "{}({})".format(
+                self.__class__.__name__,
+                dict(
+                    id=self.id,
+                    name=self.name,
+                    status=self.status,
+                    checksum=self.checksum,
+                    )
+                )
+
+    @classmethod
+    def from_response_body(cls, response_body):
+        """
+        {"image": {"status": "active", "deleted": false, "container_format":
+        "bare", "min_ram": 0, "updated_at": "2016-06-24T14:41:38.598199",
+        "owner": null, "min_disk": 0, "is_public": false, "deleted_at": null,
+        "id": "5903cb2d-53db-4343-b055-586475a077f5", "size": 13167616, "name":
+        "cirros-0.3.2-x86_64-disk.img", "checksum":
+        "64d7c1cd2b6f60c92c14662941cb7913", "created_at":
+        "2016-06-24T14:41:38.207356", "disk_format": "raw",
+        "properties": {}, "protected": false}}
+        """
+
+        response_dict = json.loads(response_body.decode())
+        image = response_dict["image"]
+
+        id = image["id"]
+        name = image["name"]
+        status = image["status"]
+        size = image["size"]
+        checksum = image["checksum"]
+
+        return cls(
+                id=id, name=name, status=status,
+                size=size, checksum=checksum
+                )
+
+
+class GlanceHTTPMockProxy(object):
+    def __init__(self, log, loop, on_http_request, on_http_response):
+        self._log = log
+        self._loop = loop
+        self._on_http_request = on_http_request
+        self._on_http_response = on_http_response
+
+    def start(self):
+        pass
+
+    def stop(self):
+        pass
+
+
+class QuickProxyServer(object):
+    """ This class implements a HTTP Proxy server
+    """
+    DEFAULT_PROXY_PORT = 9999
+    DEBUG_LEVEL = 0
+
+    def __init__(self, log, loop, proxy_port=DEFAULT_PROXY_PORT):
+        self._log = log
+        self._loop = loop
+        self._proxy_port = proxy_port
+
+        self._proxy_server = None
+
+    def __repr__(self):
+        return "{}(port={})".format(self.__class__.__name__, self._proxy_port)
+
+    def start(self, on_http_request, on_http_response):
+        """ Start the proxy server
+
+        Arguments:
+            on_http_request - A callback when a http request is initiated
+            on_http_response - A callback when a http response is initiated
+
+        """
+        self._log.debug("Starting %s", self)
+        io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(
+                asyncio_loop=self._loop
+                )
+
+        self._proxy_server = quickproxy.run_proxy(
+                port=self._proxy_port,
+                req_callback=on_http_request,
+                resp_callback=on_http_response,
+                io_loop=io_loop,
+                debug_level=QuickProxyServer.DEBUG_LEVEL
+                )
+
+    def stop(self):
+        """ Stop the proxy server """
+        if self._proxy_server is None:
+            self._log.warning("%s already stopped")
+            return
+
+        self._log.debug("Stopping %s", self)
+        self._proxy_server.stop()
+        self._proxy_server = None
+
+
+class GlanceHTTPProxyServer(object):
+    """ This class implements a HTTP Proxy server
+
+    Proxying requests to glance has the following high-level advantages:
+       - Allows us to intercept HTTP requests and responses to hook in functionality
+       - Allows us to configure the glance catalog server and keep the endpoint the same
+    """
+
+    DEFAULT_GLANCE_CONFIG = GlanceConfig()
+
+    def __init__(self, log, loop,
+                 http_proxy_server,
+                 glance_config=DEFAULT_GLANCE_CONFIG,
+                 on_create_image_request=None,
+                 on_create_image_response=None,
+                 ):
+
+        self._log = log
+        self._loop = loop
+        self._http_proxy_server = http_proxy_server
+        self._glance_config = glance_config
+
+        self._on_create_image_request = on_create_image_request
+        self._on_create_image_response = on_create_image_response
+
+    def _handle_create_image_request(self, request):
+        image_request = GlanceImageCreateRequest.from_header_dict(request.headers)
+        self._log.debug("Parsed image request: %s", image_request)
+        if self._on_create_image_request is not None:
+            self._on_create_image_request(image_request)
+
+        # Store the GlanceImageCreateRequest in the request context so it
+        # is available in the response
+        request.context["image_request"] = image_request
+
+        return request
+
+    def _handle_create_image_response(self, response):
+        image_request = response.context["image_request"]
+
+        self._log.debug("Got response body: %s", response.body)
+        image_response = GlanceImageCreateResponse.from_response_body(response.body)
+        self._log.debug("Parsed image response: %s", image_response)
+        if self._on_create_image_response is not None:
+            response = self._on_create_image_response(image_response, image_request)
+
+        return response
+
+    def start(self):
+        """ Start the glance proxy server """
+        def request_callback(request):
+            # Redirect the request to the actual glance server
+            self._log.debug("Proxying request to glance (path: %s, method: %s)",
+                            request.path, request.method)
+
+            # Save the path and method to detect whether the response for
+            # for a create_image request
+            request.context["path"] = request.path
+            request.context["method"] = request.method
+
+            if request.path.endswith("images") and request.method == "POST":
+                request = self._handle_create_image_request(request)
+
+            # Redirect the request to the actual glance server
+            request.host = self._glance_config.host
+            request.port = self._glance_config.port
+
+            return request
+
+        def response_callback(response):
+            self._log.debug("Got glance request response: %s", response)
+
+            if response.context["path"].endswith("images") and response.context["method"] == "POST":
+                response = self._handle_create_image_response(response)
+
+            return response
+
+        self._http_proxy_server.start(
+                on_http_request=request_callback,
+                on_http_response=response_callback
+                )
+
+    def stop(self):
+        """ Stop the glance proxy server """
+        self._http_proxy_server.stop()
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/__init__.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/__init__.py
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/__init__.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/__init__.py
new file mode 100644
index 0000000..3192a53
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/__init__.py
@@ -0,0 +1 @@
+from .proxy import *
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/data/test.crt b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/data/test.crt
new file mode 100644
index 0000000..7bd1818
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/data/test.crt
@@ -0,0 +1,13 @@
+-----BEGIN CERTIFICATE-----
+MIICATCCAWoCCQD3Gv0KNbBGNzANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB
+VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0
+cyBQdHkgTHRkMB4XDTE0MDIwMTA4MzYzMloXDTE0MDMwMzA4MzYzMlowRTELMAkG
+A1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0
+IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA60rQ
+dQM2tFLhNXtnlIxoegUw9FM/0DmMXYBKcRNEjJBegBaFO4+LALRsPvrl+eXerYL8
+UeRA7bgO4kkf3HokqWAsjUipTl8UV3RtDePE18m/kPLvuDO2bQMOn+94eqilZyzl
+PU/oUq+3MlwcPLyAldg/7UvkqJcq7R2MiCHv62kCAwEAATANBgkqhkiG9w0BAQUF
+AAOBgQCL/dRFUWuUfB+j4PQ+AorIimkpMsFH+7nOaiRXn1SWaYHu20h6Uxr2Xt5A
+C23lFEpRBVxfNnWbfyM0a8lhJ2/Ri/3cguVeiHJc0r93dyG+FVomRsq8doM1P9KP
+0q2Zbt3iAcuvKdJ6KJO3Zdx8DvHeJlfwymR4PyciLJgiSjJRAg==
+-----END CERTIFICATE-----
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/data/test.key b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/data/test.key
new file mode 100644
index 0000000..80e8579
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/data/test.key
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXgIBAAKBgQDrStB1Aza0UuE1e2eUjGh6BTD0Uz/QOYxdgEpxE0SMkF6AFoU7
+j4sAtGw++uX55d6tgvxR5EDtuA7iSR/ceiSpYCyNSKlOXxRXdG0N48TXyb+Q8u+4
+M7ZtAw6f73h6qKVnLOU9T+hSr7cyXBw8vICV2D/tS+SolyrtHYyIIe/raQIDAQAB
+AoGBAMSOry3RDXX+dpyTBqiV0wF8LLUuhnSQXq4NaiKkEfPK9ubR6WMkOt7P2k2S
+k2P7n9YbQmW25Hax990ZcUBh2RT7MdHpX8bICrS06MOuREgP9ldL5It9/4JpMiJV
+1+9t51TbzywE6dr1E8ROdgYtp65yBgJRzvxooF8YAPTVzJ4xAkEA/hFi1MD6MJgc
+j+dpUiyyO/02wjMGHBql+hqPxdt/cKPHAJEB3sFi8ussy6RFzn4PoVPlAAjRCT2M
+9+QBTJXLdwJBAO0U4EuvsVixZtRy0vCvXbOfkQnVcZupnc7ub3pFvY/rnfOQB4A8
+w7arBYkDeUwZsqpqlMz79wQh0pNgAgEX+B8CQQCYrioOYMn5WfAQKqkIUQPrOQgn
+PDJ3wSvtWPj9liLHtiRpGrtc+ipUgS+yUU4CAY+zC4+arbGxM+P7NHHzbDGRAkBu
+WVEs6VH6nlfmequEK5vJh3PSx+5hLcmuD30DxII/AsQ6IcfcAGx4EZI5+8vxh+SJ
+PaKU5pJK8hM5VW6ZY7axAkEAmLvHaC1cawx85m0azpRXF4JNxkauvXpzeWsAdX5p
+2aX43ke4yjbEA0HfC/8pfkS2ZV9dnIo3nrlFIu8TJPwwMw==
+-----END RSA PRIVATE KEY-----
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/proxy.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/proxy.py
new file mode 100644
index 0000000..7a7d85b
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/proxy.py
@@ -0,0 +1,436 @@
+import os
+import sys
+import urllib.parse as urlparse
+import pprint
+import http.cookies as Cookie
+import datetime
+import dateutil.parser
+from copy import copy
+
+import tornado.httpserver
+import tornado.ioloop
+import tornado.iostream
+import tornado.web
+import tornado.httpclient
+import tornado.escape
+
+__all__ = ['run_proxy', 'RequestObj', 'ResponseObj']
+
+DEFAULT_CALLBACK = lambda r: r
+
+
+class Bunch(object):
+    def __init__(self, **kwds):
+        self.__dict__.update(kwds)
+
+    def __str__(self):
+        return str(self.__dict__)
+
+
+class RequestObj(Bunch):
+    '''
+    An HTTP request object that contains the following request attributes:
+
+    protocol: either 'http' or 'https'
+    host: the destination hostname of the request
+    port: the port for the request
+    path: the path of the request ('/index.html' for example)
+    query: the query string ('?key=value&other=value')
+    fragment: the hash fragment ('#fragment')
+    method: request method ('GET', 'POST', etc)
+    username: always passed as None, but you can set it to override the user
+    password: None, but can be set to override the password
+    body: request body as a string
+    headers: a dictionary of header / value pairs
+        (for example {'Content-Type': 'text/plain', 'Content-Length': 200})
+    follow_redirects: true to follow redirects before returning a response
+    validate_cert: false to turn off SSL cert validation
+    context: a dictionary to place data that will be accessible to the response
+    '''
+    pass
+
+
+class ResponseObj(Bunch):
+    '''
+    An HTTP response object that contains the following request attributes:
+
+    code: response code, such as 200 for 'OK'
+    headers: the response headers
+    pass_headers: a list or set of headers to pass along in the response. All
+        other headeres will be stripped out. By default this includes:
+        ('Date', 'Cache-Control', 'Server', 'Content-Type', 'Location')
+    body: response body as a string
+    context: the context object from the request
+    '''
+
+    def __init__(self, **kwargs):
+        kwargs.setdefault('code', 200)
+        kwargs.setdefault('headers', {})
+        kwargs.setdefault('pass_headers', True)
+        kwargs.setdefault('body', '')
+        kwargs.setdefault('context', {})
+        super(ResponseObj, self).__init__(**kwargs)
+
+
+class ResponseStreamWriterFuture(tornado.concurrent.Future):
+    def __init__(self, write_fn, *args, **kwargs):
+        self.write_fn = write_fn
+        super().__init__(*args, **kwargs)
+
+
+def _make_proxy(methods, io_loop, req_callback, resp_callback, err_callback, debug_level=0):
+
+    @tornado.web.stream_request_body
+    class ProxyHandler(tornado.web.RequestHandler):
+
+        SUPPORTED_METHODS = methods
+
+        def initialize(self):
+            self.proxy_request_ready = tornado.concurrent.Future()
+            self.request_future = None
+
+        def on_connection_close(self):
+            if self.request_future is not None:
+                self.request_future.set_result(False)
+
+        def create_body_producer_future(self, write_fn):
+            self.request_future = ResponseStreamWriterFuture(write_fn)
+            self.proxy_request_ready.set_result(True)
+            return self.request_future
+
+        @tornado.gen.coroutine
+        def data_received(self, chunk):
+            yield self.proxy_request_ready
+
+            yield self.request_future.write_fn(chunk)
+
+        def make_requestobj(self, request):
+            '''
+            creates a request object for this request
+            '''
+
+            # get url for request
+            # surprisingly, tornado's HTTPRequest sometimes
+            # has a uri field with the full uri (http://...)
+            # and sometimes it just contains the path. :(
+
+            url = request.uri
+            if not url.startswith(u'http'):
+                url = u"{proto}://{netloc}{path}".format(
+                    proto=request.protocol,
+                    netloc=request.host,
+                    path=request.uri
+                )
+
+            parsedurl = urlparse.urlparse(url)
+
+            # Passing on the transfer encoding header, causes Tornado to not
+            # transmit valid chunks
+            headers = request.headers.copy()
+            if "Transfer-encoding" in headers:
+                del headers["Transfer-Encoding"]
+
+            # create request object
+
+            requestobj = RequestObj(
+                method=request.method,
+                protocol=parsedurl.scheme,
+                username=None,
+                password=None,
+                host=parsedurl.hostname,
+                port=parsedurl.port or 80,
+                path=parsedurl.path,
+                query=parsedurl.query,
+                fragment=parsedurl.fragment,
+                #body=request.body,
+                headers=headers,
+                follow_redirects=False,
+                validate_cert=True,
+                context={}
+            )
+
+            return requestobj, parsedurl
+
+
+        def make_request(self, obj, parsedurl):
+            '''
+            converts a request object into an HTTPRequest
+            '''
+
+            obj.headers.setdefault('Host', obj.host)
+
+            if obj.username or parsedurl.username or \
+                obj.password or parsedurl.password:
+
+                auth = u"{username}:{password}@".format(
+                    username=obj.username or parsedurl.username,
+                    password=obj.password or parsedurl.password
+                )
+
+            else:
+                auth = ''
+
+            url = u"{proto}://{auth}{host}{port}{path}{query}{frag}"
+            url = url.format(
+                proto=obj.protocol,
+                auth=auth,
+                host=obj.host,
+                port=(u':' + str(obj.port)) if (obj.port and obj.port != 80) else u'',
+                path=u'/'+obj.path.lstrip(u'/') if obj.path else u'',
+                query=u'?'+obj.query.lstrip(u'?') if obj.query else u'',
+                frag=obj.fragment
+            )
+
+            body_producer = None
+            if "Transfer-encoding" in self.request.headers and \
+                    self.request.headers["Transfer-Encoding"] == "chunked":
+                body_producer = self.create_body_producer_future
+
+            req = tornado.httpclient.HTTPRequest(
+                url=url,
+                method=obj.method,
+                body_producer=body_producer,
+                decompress_response=False,
+                headers=obj.headers,
+                follow_redirects=obj.follow_redirects,
+                allow_nonstandard_methods=True,
+                request_timeout=1*60*60 #1 hour
+            )
+
+            return req
+
+        def prepare(self):
+
+            request = self.request
+            if debug_level >= 4:
+                print("<<<<<<<< REQUEST <<<<<<<<")
+                pprint.pprint(request.__dict__)
+
+            MB = 1024 * 1024
+            GB = 1024 * MB
+
+            MAX_STREAMED_SIZE = 50 * GB
+            request.connection.set_max_body_size(MAX_STREAMED_SIZE)
+
+            requestobj, parsedurl = self.make_requestobj(request)
+
+            if debug_level >= 3:
+                print("<<<<<<<< REQUESTOBJ <<<<<<<<")
+                pprint.pprint(requestobj.__dict__)
+
+            if debug_level >= 1:
+                debugstr = "serving request from %s:%d%s " % (requestobj.host,
+                                                              requestobj.port or 80,
+                                                              requestobj.path)
+
+            modrequestobj = req_callback(requestobj)
+
+            if isinstance(modrequestobj, ResponseObj):
+                self.handle_response(modrequestobj)
+                return
+
+            if debug_level >= 1:
+                print(debugstr + "to %s:%d%s" % (modrequestobj.host,
+                                                 modrequestobj.port or 80,
+                                                 modrequestobj.path))
+
+            outreq = self.make_request(modrequestobj, parsedurl)
+
+            if debug_level >= 2:
+                print(">>>>>>>> REQUEST >>>>>>>>")
+                print("%s %s" % (outreq.method, outreq.url))
+                for k, v in outreq.headers.items():
+                    print( "%s: %s" % (k, v))
+
+            # send the request
+
+            def _resp_callback(response):
+                self.handle_response(response, context=modrequestobj.context)
+
+            client = tornado.httpclient.AsyncHTTPClient(io_loop=io_loop)
+            try:
+                client.fetch(outreq, _resp_callback,
+                             validate_cert=modrequestobj.validate_cert)
+            except tornado.httpclient.HTTPError as e:
+                if hasattr(e, 'response') and e.response:
+                    self.handle_response(e.response,
+                                         context=modrequestobj.context,
+                                         error=True)
+                else:
+                    self.set_status(500)
+                    self.write('Internal server error:\n' + str(e))
+                    self.finish()
+
+
+        def handle_response(self, response, context={}, error=False):
+
+            if not isinstance(response, ResponseObj):
+                if debug_level >= 4:
+                    print("<<<<<<<< RESPONSE <<<<<<<")
+                    pprint.pprint(response.__dict__)
+
+                responseobj = ResponseObj(
+                    code=response.code,
+                    headers=response.headers,
+                    pass_headers=True,
+                    body=response.body,
+                    context=context,
+                )
+            else:
+                responseobj = response
+
+            if debug_level >= 3:
+                print("<<<<<<<< RESPONSEOBJ <<<<<<<")
+                responseprint = copy(responseobj)
+                responseprint.body = "-- body content not displayed --"
+                pprint.pprint(responseprint.__dict__)
+
+            if not error:
+                mod = resp_callback(responseobj)
+            else:
+                mod = err_callback(responseobj)
+
+            # set the response status code
+
+            if mod.code == 599:
+                self.set_status(500)
+                self.write('Internal server error. Server unreachable.')
+                self.finish()
+                return
+
+            self.set_status(mod.code)
+
+            # set the response headers
+
+            if type(mod.pass_headers) == bool:
+                header_keys = mod.headers.keys() if mod.pass_headers else []
+            else:
+                header_keys = mod.pass_headers
+            for key in header_keys:
+                if key.lower() == "set-cookie":
+                    cookies = Cookie.BaseCookie()
+                    cookies.load(tornado.escape.native_str(mod.headers.get(key)))
+                    for cookie_key in cookies:
+                        cookie = cookies[cookie_key]
+                        params = dict(cookie)
+                        expires = params.pop('expires', None)
+                        if expires:
+                            expires = dateutil.parser.parse(expires)
+                        self.set_cookie(
+                            cookie.key,
+                            cookie.value,
+                            expires = expires,
+                            **params
+                        )
+                else:
+                    val = mod.headers.get(key)
+                    self.set_header(key, val)
+
+            if debug_level >= 2:
+                print(">>>>>>>> RESPONSE (%s) >>>>>>>" % mod.code)
+                for k, v in self._headers.items():
+                    print("%s: %s" % (k, v))
+                if hasattr(self, '_new_cookie'):
+                    print(self._new_cookie.output())
+
+            # set the response body
+
+            if mod.body:
+                self.write(mod.body)
+
+            self.finish()
+
+        @tornado.web.asynchronous
+        def get(self):
+            pass
+
+        @tornado.web.asynchronous
+        def options(self):
+            pass
+
+        @tornado.web.asynchronous
+        def head(self):
+            pass
+
+        @tornado.web.asynchronous
+        def put(self):
+            self.request_future.set_result(True)
+
+        @tornado.web.asynchronous
+        def patch(self):
+            self.request_future.set_result(True)
+
+        @tornado.web.asynchronous
+        def post(self):
+            self.request_future.set_result(True)
+
+        @tornado.web.asynchronous
+        def delete(self):
+            pass
+
+
+    return ProxyHandler
+
+
+def run_proxy(port,
+              methods=['GET', 'POST', 'PUT', 'DELETE', 'HEAD'],
+              req_callback=DEFAULT_CALLBACK,
+              resp_callback=DEFAULT_CALLBACK,
+              err_callback=DEFAULT_CALLBACK,
+              test_ssl=False,
+              debug_level=0,
+              io_loop=None,
+              ):
+
+    """
+    Run proxy on the specified port.
+
+    methods: the HTTP methods this proxy will support
+    req_callback: a callback that is passed a RequestObj that it should
+        modify and then return
+    resp_callback: a callback that is given a ResponseObj that it should
+        modify and then return
+    err_callback: in the case of an error, this callback will be called.
+        there's no difference between how this and the resp_callback are
+        used.
+    test_ssl: if true, will wrap the socket in an self signed ssl cert
+    start_ioloop: if True (default), the tornado IOLoop will be started
+        immediately.
+    debug_level: 0 no debug, 1 basic, 2 verbose
+    """
+
+    io_loop = tornado.ioloop.IOLoop.instance() if io_loop is None else io_loop
+
+    app = tornado.web.Application([
+        (r'.*', _make_proxy(methods=methods,
+                            io_loop=io_loop,
+                            req_callback=req_callback,
+                            resp_callback=resp_callback,
+                            err_callback=err_callback,
+                            debug_level=debug_level)),
+    ])
+
+    if test_ssl:
+        this_dir, this_filename = os.path.split(__file__)
+        kwargs = {
+            "ssl_options": {
+                "certfile": os.path.join(this_dir, "data", "test.crt"),
+                "keyfile": os.path.join(this_dir, "data", "test.key"),
+            },
+            "io_loop": io_loop,
+        }
+    else:
+        kwargs = {"io_loop": io_loop}
+
+    http_server = tornado.httpserver.HTTPServer(app, **kwargs)
+    http_server.listen(port)
+    return http_server
+
+
+if __name__ == '__main__':
+    port = 8888
+    if len(sys.argv) > 1:
+        port = int(sys.argv[1])
+
+    print("Starting HTTP proxy on port %d" % port)
+    run_proxy(port)
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/tasklet.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/tasklet.py
new file mode 100644
index 0000000..027e582
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/tasklet.py
@@ -0,0 +1,535 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import os
+import threading
+import time
+
+import rift.tasklets
+import rift.mano.cloud
+
+from . import glance_proxy_server
+from . import glance_client
+from . import upload
+
+import gi
+gi.require_version('RwImageMgmtYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwDts', '1.0')
+
+from gi.repository import (
+    RwcalYang,
+    RwDts as rwdts,
+    RwImageMgmtYang,
+    RwLaunchpadYang,
+)
+
+
+class ImageRequestError(Exception):
+    pass
+
+
+class AccountNotFoundError(ImageRequestError):
+    pass
+
+
+class ImageNotFoundError(ImageRequestError):
+    pass
+
+
+class CloudAccountDtsHandler(object):
+    def __init__(self, log, dts, log_hdl):
+        self._dts = dts
+        self._log = log
+        self._log_hdl = log_hdl
+        self._cloud_cfg_subscriber = None
+
+    def register(self, on_add_apply, on_delete_apply):
+        self._log.debug("creating cloud account config handler")
+        self._cloud_cfg_subscriber = rift.mano.cloud.CloudAccountConfigSubscriber(
+                self._dts, self._log, self._log_hdl,
+                rift.mano.cloud.CloudAccountConfigCallbacks(
+                    on_add_apply=on_add_apply,
+                    on_delete_apply=on_delete_apply,
+                    )
+                )
+        self._cloud_cfg_subscriber.register()
+
+
+def openstack_image_to_image_info(openstack_image):
+    """Convert the OpenstackImage to a ImageInfo protobuf message
+
+    Arguments:
+        openstack_image - A OpenstackImage instance
+
+    Returns:
+        A ImageInfo CAL protobuf message
+    """
+
+    image_info = RwcalYang.ImageInfoItem()
+
+    copy_fields = ["id", "name", "checksum", "container_format", "disk_format"]
+    for field in copy_fields:
+        value = getattr(openstack_image, field)
+        setattr(image_info, field, value)
+
+    image_info.state = openstack_image.status
+
+    return image_info
+
+
+class ImageDTSShowHandler(object):
+    """ A DTS publisher for the upload-jobs data container """
+    def __init__(self, log, loop, dts, job_controller):
+        self._log = log
+        self._loop = loop
+        self._dts = dts
+        self._job_controller = job_controller
+
+        self._subscriber = None
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register as a publisher and wait for reg_ready to complete """
+        def get_xpath():
+            return "D,/rw-image-mgmt:upload-jobs"
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            if action != rwdts.QueryAction.READ:
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+                return
+
+            jobs_pb_msg = self._job_controller.pb_msg
+
+            xact_info.respond_xpath(
+                    rwdts.XactRspCode.ACK,
+                    xpath=get_xpath(),
+                    msg=jobs_pb_msg,
+                    )
+
+        reg_event = asyncio.Event(loop=self._loop)
+
+        @asyncio.coroutine
+        def on_ready(regh, status):
+            reg_event.set()
+
+        self._subscriber = yield from self._dts.register(
+                xpath=get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare,
+                    on_ready=on_ready,
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+        yield from reg_event.wait()
+
+
+class ImageDTSRPCHandler(object):
+    """ A DTS publisher for the upload-job RPC's """
+    def __init__(self, log, loop, dts, accounts, glance_client, upload_task_creator, job_controller):
+        self._log = log
+        self._loop = loop
+        self._dts = dts
+        self._accounts = accounts
+        self._glance_client = glance_client
+        self._upload_task_creator = upload_task_creator
+        self._job_controller = job_controller
+
+        self._subscriber = None
+
+    @asyncio.coroutine
+    def _register_create_upload_job(self):
+        def get_xpath():
+            return "/rw-image-mgmt:create-upload-job"
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            create_msg = msg
+
+            account_names = create_msg.cloud_account
+            # If cloud accounts were not specified, upload image to all cloud account
+            if not account_names:
+                account_names = list(self._accounts.keys())
+
+            for account_name in account_names:
+                if account_name not in self._accounts:
+                    raise AccountNotFoundError("Could not find account %s", account_name)
+
+            if create_msg.has_field("external_url"):
+                glance_image = yield from self._upload_task_creator.create_glance_image_from_url_create_rpc(
+                        account_names, create_msg.external_url
+                        )
+
+                tasks = yield from self._upload_task_creator.create_tasks_from_glance_id(
+                    account_names, glance_image.id
+                    )
+
+                def delete_image(ft):
+                    try:
+                        self._glance_client.delete_image_from_id(glance_image.id)
+                    except glance_client.OpenstackImageDeleteError:
+                        pass
+
+                # Create a job and when the job completes delete the temporary
+                # image from the catalog.
+                job_id = self._job_controller.create_job(
+                        tasks,
+                        on_completed=delete_image
+                        )
+
+            elif create_msg.has_field("onboarded_image"):
+                tasks = yield from self._upload_task_creator.create_tasks_from_onboarded_create_rpc(
+                    account_names, create_msg.onboarded_image
+                    )
+                job_id = self._job_controller.create_job(tasks)
+
+            else:
+                raise ImageRequestError("an image selection must be provided")
+
+            rpc_out_msg = RwImageMgmtYang.CreateUploadJobOutput(job_id=job_id)
+
+            xact_info.respond_xpath(
+                    rwdts.XactRspCode.ACK,
+                    xpath="O," + get_xpath(),
+                    msg=rpc_out_msg,
+                    )
+
+        reg_event = asyncio.Event(loop=self._loop)
+
+        @asyncio.coroutine
+        def on_ready(_, status):
+            reg_event.set()
+
+        self._subscriber = yield from self._dts.register(
+                xpath="I," + get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare,
+                    on_ready=on_ready,
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+        yield from reg_event.wait()
+
+    @asyncio.coroutine
+    def _register_cancel_upload_job(self):
+        def get_xpath():
+            return "/rw-image-mgmt:cancel-upload-job"
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            if not msg.has_field("job_id"):
+                self._log.error("cancel-upload-job missing job-id field.")
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+                return
+
+            job_id = msg.job_id
+
+            job = self._job_controller.get_job(job_id)
+            job.stop()
+
+            xact_info.respond_xpath(
+                    rwdts.XactRspCode.ACK,
+                    xpath="O," + get_xpath(),
+                    )
+
+        reg_event = asyncio.Event(loop=self._loop)
+
+        @asyncio.coroutine
+        def on_ready(_, status):
+            reg_event.set()
+
+        self._subscriber = yield from self._dts.register(
+                xpath="I," + get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare,
+                    on_ready=on_ready,
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+        yield from reg_event.wait()
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for RPC's and wait for all registrations to complete """
+        yield from self._register_create_upload_job()
+        yield from self._register_cancel_upload_job()
+
+
+class GlanceClientUploadTaskCreator(object):
+    """ This class creates upload tasks using configured cloud accounts and
+    configured image catalog glance client """
+
+    def __init__(self, log, loop, accounts, glance_client):
+        self._log = log
+        self._loop = loop
+        self._accounts = accounts
+        self._glance_client = glance_client
+
+    @asyncio.coroutine
+    def create_tasks(self, account_names, image_id=None, image_name=None, image_checksum=None):
+        """ Create a list of UploadTasks for a list of cloud accounts
+        and a image with a matching image_name and image_checksum in the
+        catalog
+
+        Arguments:
+            account_names - A list of configured cloud account names
+            image_id - A image id
+            image_name - A image name
+            image_checksum - A image checksum
+
+        Returns:
+            A list of AccountImageUploadTask instances
+
+        Raises:
+            ImageNotFoundError - Could not find a matching image in the
+                image catalog
+
+            AccountNotFoundError - Could not find an account that matched
+                the provided account name
+        """
+        try:
+            image = yield from asyncio.wait_for(
+                    self._loop.run_in_executor(
+                            None,
+                            self._glance_client.find_active_image,
+                            image_id,
+                            image_name,
+                            image_checksum,
+                            ),
+                    timeout=5,
+                    loop=self._loop,
+                    )
+
+        except glance_client.OpenstackImageError as e:
+            msg = "Could not find image in Openstack to upload"
+            self._log.exception(msg)
+            raise ImageNotFoundError(msg) from e
+
+        image_info = openstack_image_to_image_info(image)
+        self._log.debug("created image info: %s", image_info)
+
+        tasks = []
+        for account_name in account_names:
+            if account_name not in self._accounts:
+                raise AccountNotFoundError("Could not find account %s", account_name)
+
+        # For each account name provided, create a pipe (GlanceImagePipeGen)
+        # which feeds data into the UploadTask while also monitoring the various
+        # transmit stats (progress, bytes written, bytes per second, etc)
+        for account_name in account_names:
+            account = self._accounts[account_name]
+            self._log.debug("creating task for account %s", account.name)
+            glance_data_gen = self._glance_client.get_image_data(image_info.id)
+
+            pipe_gen = upload.GlanceImagePipeGen(self._log, self._loop, glance_data_gen)
+            progress_pipe = upload.UploadProgressWriteProxy(
+                    self._log, self._loop, image.size, pipe_gen.write_hdl
+                    )
+            progress_pipe.start_rate_monitoring()
+            pipe_gen.write_hdl = progress_pipe
+            pipe_gen.start()
+
+            task = upload.AccountImageUploadTask(
+                    self._log, self._loop, account, image_info, pipe_gen.read_hdl,
+                    progress_info=progress_pipe, write_canceller=pipe_gen,
+                    )
+            tasks.append(task)
+            self._log.debug("task created: %s", task)
+
+        return tasks
+
+    @asyncio.coroutine
+    def create_glance_image_from_url_create_rpc(self, account_names, create_msg):
+        if "image_url" not in create_msg:
+            raise ValueError("image_url must be specified")
+
+        if "image_id" in create_msg:
+            raise ImageRequestError("Cannot specify both image_url and image_id")
+
+        if "image_name" not in create_msg:
+            raise ImageRequestError("image_name must be specified when image_url is provided")
+
+        glance_image = yield from asyncio.wait_for(
+                self._loop.run_in_executor(
+                    None,
+                    self._glance_client.create_image_from_url,
+                    create_msg.image_url,
+                    create_msg.image_name,
+                    create_msg.image_checksum if "image_checksum" in create_msg else None,
+                    create_msg.disk_format if "disk_format" in create_msg else None,
+                    create_msg.container_format if "container_format" in create_msg else None,
+                    ),
+                timeout=5,
+                loop=self._loop,
+                )
+
+        return glance_image
+
+    @asyncio.coroutine
+    def create_tasks_from_glance_id(self, account_names, glance_image_id):
+        return (yield from self.create_tasks(account_names, glance_image_id))
+
+    @asyncio.coroutine
+    def create_tasks_from_onboarded_create_rpc(self, account_names, create_msg):
+        return (yield from self.create_tasks(
+            account_names,
+            create_msg.image_id if "image_id" in create_msg else None,
+            create_msg.image_name if "image_name" in create_msg else None,
+            create_msg.image_checksum if "image_checksum" in create_msg else None)
+            )
+
+
+class ImageManagerTasklet(rift.tasklets.Tasklet):
+    """
+    The RwImageMgrTasklet provides a interface for DTS to interact with an
+    instance of the Monitor class. This allows the Monitor class to remain
+    independent of DTS.
+    """
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.rwlog.set_category("rw-mano-log")
+
+        self.cloud_cfg_subscriber = None
+        self.http_proxy = None
+        self.proxy_server = None
+        self.dts = None
+        self.job_controller = None
+        self.cloud_accounts = {}
+        self.glance_client = None
+        self.task_creator = None
+        self.rpc_handler = None
+        self.show_handler = None
+
+    def start(self):
+        super().start()
+        self.log.info("Starting Image Manager Tasklet")
+
+        self.log.debug("Registering with dts")
+        self.dts = rift.tasklets.DTS(
+                self.tasklet_info,
+                RwImageMgmtYang.get_schema(),
+                self.loop,
+                self.on_dts_state_change
+                )
+
+        self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+    def stop(self):
+        try:
+            self.dts.deinit()
+        except Exception as e:
+            self.log.exception(e)
+
+    @asyncio.coroutine
+    def init(self):
+        try:
+            self.log.debug("creating cloud account handler")
+            self.cloud_cfg_subscriber = CloudAccountDtsHandler(self.log, self.dts, self.log_hdl)
+            self.cloud_cfg_subscriber.register(
+                    self.on_cloud_account_create,
+                    self.on_cloud_account_delete
+                    )
+
+            self.log.debug("creating http proxy server")
+
+            self.http_proxy = glance_proxy_server.QuickProxyServer(self.log, self.loop)
+
+            self.proxy_server = glance_proxy_server.GlanceHTTPProxyServer(
+                    self.log, self.loop, self.http_proxy
+                    )
+            self.proxy_server.start()
+
+            self.job_controller = upload.ImageUploadJobController(
+                    self.log, self.loop
+                    )
+
+            self.glance_client = glance_client.OpenstackGlanceClient.from_token(
+                    self.log, "127.0.0.1", "9292", "test"
+                    )
+
+            self.task_creator = GlanceClientUploadTaskCreator(
+                    self.log, self.loop, self.cloud_accounts, self.glance_client
+                    )
+
+            self.rpc_handler = ImageDTSRPCHandler(
+                    self.log, self.loop, self.dts, self.cloud_accounts, self.glance_client, self.task_creator,
+                    self.job_controller
+                    )
+            yield from self.rpc_handler.register()
+
+            self.show_handler = ImageDTSShowHandler(
+                    self.log, self.loop, self.dts, self.job_controller
+                    )
+            yield from self.show_handler.register()
+
+        except Exception as e:
+            self.log.exception("error during init")
+
+    def on_cloud_account_create(self, account):
+        self.log.debug("adding cloud account: %s", account.name)
+        self.cloud_accounts[account.name] = account
+
+    def on_cloud_account_delete(self, account_name):
+        self.log.debug("deleting cloud account: %s", account_name)
+        if account_name not in self.cloud_accounts:
+            self.log.warning("cloud account not found: %s", account_name)
+
+        del self.cloud_accounts[account_name]
+
+    @asyncio.coroutine
+    def run(self):
+        pass
+
+    def on_instance_started(self):
+        self.log.debug("Got instance started callback")
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Handle DTS state change
+
+        Take action according to current DTS state to transition application
+        into the corresponding application state
+
+        Arguments
+            state - current dts state
+
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self.dts.handle.set_state(next_state)
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/upload.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/upload.py
new file mode 100644
index 0000000..7ce74b2
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/upload.py
@@ -0,0 +1,709 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import collections
+import itertools
+import os
+import time
+import threading
+
+import rift.mano.cloud
+
+import gi
+gi.require_version('RwImageMgmtYang', '1.0')
+from gi.repository import (
+        RwImageMgmtYang,
+        )
+
+
+class UploadJobError(Exception):
+    pass
+
+
+class ImageUploadTaskError(Exception):
+    pass
+
+
+class ImageUploadError(ImageUploadTaskError):
+    pass
+
+
+class ImageListError(ImageUploadTaskError):
+    pass
+
+
+class ImageUploadJobController(object):
+    """ This class starts and manages ImageUploadJobs """
+    MAX_COMPLETED_JOBS = 20
+
+    def __init__(self, log, loop, max_completed_jobs=MAX_COMPLETED_JOBS):
+        self._log = log
+        self._loop = loop
+        self._job_id_gen = itertools.count(1)
+        self._max_completed_jobs = max_completed_jobs
+
+        self._jobs = {}
+        self._completed_jobs = collections.deque(
+                maxlen=self._max_completed_jobs
+                )
+
+    @property
+    def pb_msg(self):
+        """ the UploadJobs protobuf message """
+        upload_jobs_msg = RwImageMgmtYang.UploadJobs()
+        for job in self._jobs.values():
+            upload_jobs_msg.job.append(job.pb_msg)
+
+        return upload_jobs_msg
+
+    @property
+    def jobs(self):
+        """ the tracked list of ImageUploadJobs """
+        return self._jobs.values()
+
+    @property
+    def completed_jobs(self):
+        """ completed jobs in the tracked list of ImageUploadJobs """
+        return [job for job in self._jobs.values() if job in self._completed_jobs]
+
+    @property
+    def active_jobs(self):
+        """ in-progress jobs in the tracked list of ImageUploadJobs """
+        return [job for job in self._jobs.values() if job not in self._completed_jobs]
+
+    def _add_job(self, job):
+        self._jobs[job.id] = job
+
+    def _start_job(self, job, on_completed=None):
+        def on_job_completed(_):
+            self._log.debug("%s completed.  Adding to completed jobs list.", job)
+
+            # If adding a new completed job is going to overflow the
+            # completed job list, find the first job that completed and
+            # remove it from the tracked jobs.
+            if len(self._completed_jobs) == self._completed_jobs.maxlen:
+                first_completed_job = self._completed_jobs[-1]
+                del self._jobs[first_completed_job.id]
+
+            self._completed_jobs.appendleft(job)
+
+        job_future = job.start()
+        job_future.add_done_callback(on_job_completed)
+
+        if on_completed is not None:
+            job_future.add_done_callback(on_completed)
+
+    def get_job(self, job_id):
+        """ Get the UploadJob from the job id
+
+        Arguments:
+            job_id - the job id that was previously added to the controller
+
+        Returns:
+            The associated ImageUploadJob
+
+        Raises:
+            LookupError - Could not find the job id
+        """
+        if job_id not in self._jobs:
+            raise LookupError("Could not find job_id %s" % job_id)
+
+        return self._jobs[job_id]
+
+    def create_job(self, image_tasks, on_completed=None):
+        """ Create and start a ImageUploadJob from a list of ImageUploadTasks
+
+        Arguments:
+            image_tasks - a list of ImageUploadTasks
+            on_completed - a callback which is added to the job future
+
+        Returns:
+            A ImageUploadJob id
+        """
+        self._log.debug("Creating new job from %s image tasks", len(image_tasks))
+        new_job = ImageUploadJob(
+                self._log,
+                self._loop,
+                image_tasks,
+                job_id=next(self._job_id_gen)
+                )
+
+        self._add_job(new_job)
+        self._start_job(new_job, on_completed=on_completed)
+
+        return new_job.id
+
+
+class ImageUploadJob(object):
+    """ This class manages a set of ImageUploadTasks
+
+    In order to push an image (or set of images) to many cloud accounts, and get a single
+    status on that operation, we need a single status that represents all of those tasks.
+
+    The ImageUploadJob provides a single endpoint to control all the tasks and report
+    when all images are successfully upload or when any one fails.
+    """
+    STATES = ("QUEUED", "IN_PROGRESS", "CANCELLING", "CANCELLED", "COMPLETED", "FAILED")
+    TIMEOUT_JOB = 6 * 60 * 60  # 6 hours
+    JOB_GEN = itertools.count(1)
+
+    def __init__(self, log, loop, upload_tasks, job_id=None, timeout_job=TIMEOUT_JOB):
+        self._log = log
+        self._loop = loop
+        self._upload_tasks = upload_tasks
+        self._job_id = next(ImageUploadJob.JOB_GEN) if job_id is None else job_id
+        self._timeout_job = timeout_job
+
+        self._state = "QUEUED"
+        self._state_stack = [self._state]
+
+        self._start_time = time.time()
+        self._stop_time = 0
+
+        self._task_future_map = {}
+        self._job_future = None
+
+    def __repr__(self):
+        return "{}(job_id={}, state={})".format(
+                self.__class__.__name__, self._job_id, self._state
+                )
+
+    @property
+    def id(self):
+        return self._job_id
+
+    @property
+    def state(self):
+        """ The state of the ImageUploadJob """
+        return self._state
+
+    @state.setter
+    def state(self, new_state):
+        """ Set the state of the ImageUploadJob """
+        states = ImageUploadJob.STATES
+        assert new_state in states
+        assert states.index(new_state) >= states.index(self._state)
+        self._state_stack.append(new_state)
+
+        self._state = new_state
+
+    @property
+    def state_stack(self):
+        """ The list of states that this job progressed through  """
+        return self._state_stack
+
+    @property
+    def pb_msg(self):
+        """ The UploadJob protobuf message """
+        task = RwImageMgmtYang.UploadJob.from_dict({
+            "id": self._job_id,
+            "status": self._state,
+            "start_time": self._start_time,
+            "upload_tasks": [task.pb_msg for task in self._upload_tasks]
+        })
+
+        if self._stop_time:
+            task.stop_time = self._stop_time
+
+        return task
+
+    def _start_upload_tasks(self):
+        self._log.debug("Starting %s upload tasks", len(self._upload_tasks))
+
+        for upload_task in self._upload_tasks:
+            upload_task.start()
+
+    @asyncio.coroutine
+    def _wait_for_upload_tasks(self):
+        self._log.debug("Waiting for upload tasks to complete")
+
+        wait_coroutines = [t.wait() for t in self._upload_tasks]
+        if wait_coroutines:
+            yield from asyncio.wait(
+                    wait_coroutines,
+                    timeout=self._timeout_job,
+                    loop=self._loop
+                    )
+
+        self._log.debug("All upload tasks completed")
+
+    def _set_final_job_state(self):
+        failed_tasks = []
+        for task in self._upload_tasks:
+            if task.state != "COMPLETED":
+                failed_tasks.append(task)
+
+        if failed_tasks:
+            self._log.error("%s had %s FAILED tasks.", self, len(failed_tasks))
+            self.state = "FAILED"
+        else:
+            self._log.debug("%s tasks completed successfully", len(self._upload_tasks))
+            self.state = "COMPLETED"
+
+    @asyncio.coroutine
+    def _cancel_job(self):
+        for task in self._upload_tasks:
+            task.stop()
+
+        # TODO: Wait for all tasks to actually reach terminal
+        # states.
+
+        self.state = "CANCELLED"
+
+    @asyncio.coroutine
+    def _do_job(self):
+        self.state = "IN_PROGRESS"
+        self._start_upload_tasks()
+        try:
+            yield from self._wait_for_upload_tasks()
+        except asyncio.CancelledError:
+            self._log.debug("%s was cancelled.  Cancelling all tasks.",
+                            self)
+            self._loop.create_task(self._cancel_job())
+            raise
+        finally:
+            self._stop_time = time.time()
+            self._job_future = None
+
+        self._set_final_job_state()
+
+    @asyncio.coroutine
+    def wait(self):
+        """ Wait for the job to reach a terminal state """
+        if self._job_future is None:
+            raise UploadJobError("Job not started")
+
+        yield from asyncio.wait_for(
+                self._job_future,
+                self._timeout_job,
+                loop=self._loop
+                )
+
+    def start(self):
+        """ Start the job and all child tasks """
+        if self._state != "QUEUED":
+            raise UploadJobError("Job already started")
+
+        self._job_future = self._loop.create_task(self._do_job())
+        return self._job_future
+
+    def stop(self):
+        """ Stop the job and all child tasks  """
+        if self._job_future is not None:
+            self.state = "CANCELLING"
+            self._job_future.cancel()
+
+
+class ByteRateCalculator(object):
+    """  This class produces a byte rate from inputted measurements"""
+    def __init__(self, rate_time_constant):
+        self._rate = 0
+        self._time_constant = rate_time_constant
+
+    @property
+    def rate(self):
+        return self._rate
+
+    def add_measurement(self, num_bytes, time_delta):
+        rate = num_bytes / time_delta
+        if self._rate == 0:
+            self._rate = rate
+        else:
+            self._rate += ((rate - self._rate) / self._time_constant)
+
+        return self._rate
+
+
+class UploadProgressWriteProxy(object):
+    """ This class implements a write proxy with produces various progress stats
+
+    In order to keep the complexity of the UploadTask down, this class acts as a
+    proxy for a file write.  By providing the original handle to be written to
+    and having the client class call write() on this object, we can produce the
+    various statistics to be consumed.
+    """
+    RATE_TIME_CONSTANT = 5
+
+    def __init__(self, log, loop, bytes_total, write_hdl):
+        self._log = log
+        self._loop = loop
+        self._bytes_total = bytes_total
+        self._write_hdl = write_hdl
+
+        self._bytes_written = 0
+        self._byte_rate = 0
+
+        self._rate_calc = ByteRateCalculator(UploadProgressWriteProxy.RATE_TIME_CONSTANT)
+        self._rate_task = None
+
+    def write(self, data):
+        self._write_hdl.write(data)
+        self._bytes_written += len(data)
+
+    def close(self):
+        self._write_hdl.close()
+        if self._rate_task is not None:
+            self._log.debug("stopping rate monitoring task")
+            self._rate_task.cancel()
+
+    def start_rate_monitoring(self):
+        """ Start the rate monitoring task """
+        @asyncio.coroutine
+        def periodic_rate_task():
+            while True:
+                start_time = time.time()
+                start_bytes = self._bytes_written
+                yield from asyncio.sleep(1, loop=self._loop)
+                time_period = time.time() - start_time
+                num_bytes = self._bytes_written - start_bytes
+
+                self._byte_rate = self._rate_calc.add_measurement(num_bytes, time_period)
+
+        self._log.debug("starting rate monitoring task")
+        self._rate_task = self._loop.create_task(periodic_rate_task())
+
+    @property
+    def progress_percent(self):
+        if self._bytes_total == 0:
+            return 0
+
+        return int(self._bytes_written / self._bytes_total * 100)
+
+    @property
+    def bytes_written(self):
+        return self._bytes_written
+
+    @property
+    def bytes_total(self):
+        return self._bytes_total
+
+    @property
+    def bytes_rate(self):
+        return self._byte_rate
+
+
+class GlanceImagePipeGen(object):
+    """ This class produces a read file handle from a generator that produces bytes
+
+    The CAL API takes a file handle as an input.  The Glance API creates a generator
+    that produces byte strings.  This class acts as the mediator by creating a pipe
+    and pumping the bytestring from the generator into the write side of the pipe.
+
+    A pipe has the useful feature here that it will block at the buffer size until
+    the reader has consumed.  This allows us to only pull from glance and push at the
+    pace of the reader preventing us from having to store the images locally on disk.
+    """
+    def __init__(self, log, loop, data_gen):
+        self._log = log
+        self._loop = loop
+        self._data_gen = data_gen
+
+        read_fd, write_fd = os.pipe()
+
+        self._read_hdl = os.fdopen(read_fd, 'rb')
+        self._write_hdl = os.fdopen(write_fd, 'wb')
+        self._close_hdl = self._write_hdl
+
+    @property
+    def write_hdl(self):
+        return self._write_hdl
+
+    @write_hdl.setter
+    def write_hdl(self, new_write_hdl):
+        self._write_hdl = new_write_hdl
+
+    @property
+    def read_hdl(self):
+        return self._read_hdl
+
+    def _gen_writer(self):
+        self._log.debug("starting image data write to pipe")
+        try:
+            for data in self._data_gen:
+                try:
+                    self._write_hdl.write(data)
+                except (BrokenPipeError, ValueError) as e:
+                    self._log.warning("write pipe closed: %s", str(e))
+                    return
+
+        except Exception as e:
+            self._log.exception("error when writing data to pipe: %s", str(e))
+
+        finally:
+            self._log.debug("closing write side of pipe")
+            try:
+                self._write_hdl.close()
+            except OSError:
+                pass
+
+    def start(self):
+        t = threading.Thread(target=self._gen_writer)
+        t.daemon = True
+        t.start()
+
+    def stop(self):
+        self._log.debug("stop requested, closing write side of pipe")
+        self._write_hdl.close()
+
+
+class AccountImageUploadTask(object):
+    """ This class manages an create_image task from an image info and file handle
+
+    Manage the upload of a image to a configured cloud account.
+    """
+    STATES = ("QUEUED", "CHECK_IMAGE_EXISTS", "UPLOADING", "CANCELLING", "CANCELLED", "COMPLETED", "FAILED")
+
+    TIMEOUT_CHECK_EXISTS = 10
+    TIMEOUT_IMAGE_UPLOAD = 6 * 60 * 60  # 6 hours
+
+    def __init__(self, log, loop, account, image_info, image_hdl,
+                 timeout_exists=TIMEOUT_CHECK_EXISTS, timeout_upload=TIMEOUT_IMAGE_UPLOAD,
+                 progress_info=None, write_canceller=None
+                 ):
+        self._log = log
+        self._loop = loop
+        self._account = account
+        self._image_info = image_info.deep_copy()
+        self._image_hdl = image_hdl
+
+        self._timeout_exists = timeout_exists
+        self._timeout_upload = timeout_upload
+
+        self._progress_info = progress_info
+        self._write_canceller = write_canceller
+
+        self._state = "QUEUED"
+        self._state_stack = [self._state]
+
+        self._detail = "Task is waiting to be started"
+        self._start_time = time.time()
+        self._stop_time = 0
+        self._upload_future = None
+
+        if not image_info.has_field("name"):
+            raise ValueError("image info must have name field")
+
+    @property
+    def state(self):
+        return self._state
+
+    @state.setter
+    def state(self, new_state):
+        states = AccountImageUploadTask.STATES
+        assert new_state in states
+        assert states.index(new_state) >= states.index(self._state)
+        self._state_stack.append(new_state)
+
+        self._state = new_state
+
+    @property
+    def state_stack(self):
+        return self._state_stack
+
+    @property
+    def image_id(self):
+        """ The image name being uploaded """
+        return self._image_info.id
+
+    @property
+    def image_name(self):
+        """ The image name being uploaded """
+        return self._image_info.name
+
+    @property
+    def image_checksum(self):
+        """ The image checksum being uploaded """
+        if self._image_info.has_field("checksum"):
+            return self._image_info.checksum
+
+        return None
+
+    @property
+    def cloud_account(self):
+        """ The cloud account name which the image is being uploaded to """
+        return self._account.name
+
+    @property
+    def pb_msg(self):
+        """ The UploadTask protobuf message """
+        task = RwImageMgmtYang.UploadTask.from_dict({
+            "cloud_account": self.cloud_account,
+            "image_id": self.image_id,
+            "image_name": self.image_name,
+            "status": self.state,
+            "detail": self._detail,
+            "start_time": self._start_time,
+        })
+
+        if self.image_checksum is not None:
+            task.image_checksum = self.image_checksum
+
+        if self._stop_time:
+            task.stop_time = self._stop_time
+
+        if self._progress_info:
+            task.bytes_written = self._progress_info.bytes_written
+            task.bytes_total = self._progress_info.bytes_total
+            task.progress_percent = self._progress_info.progress_percent
+            task.bytes_per_second = self._progress_info.bytes_rate
+
+        if self.state == "COMPLETED":
+            task.progress_percent = 100
+
+        return task
+
+    def _get_account_images(self):
+        account_images = []
+        self._log.debug("getting image list for account {}".format(self._account.name))
+        try:
+            account_images = self._account.get_image_list()
+        except rift.mano.cloud.CloudAccountCalError as e:
+            msg = "could not get image list for account {}".format(self._account.name)
+            self._log.error(msg)
+            raise ImageListError(msg) from e
+
+        return account_images
+
+    def _has_existing_image(self):
+        account = self._account
+
+        account_images = self._get_account_images()
+
+        matching_images = [i for i in account_images if i.name == self.image_name]
+
+        if self.image_checksum is not None:
+            matching_images = [i for i in matching_images if i.checksum == self.image_checksum]
+
+        if matching_images:
+            self._log.debug("found matching image with checksum in account %s",
+                            account.name)
+            return True
+
+        self._log.debug("did not find matching image with checksum in account %s",
+                        account.name)
+        return False
+
+    def _upload_image(self):
+        image = self._image_info
+        account = self._account
+
+        image.fileno = self._image_hdl.fileno()
+
+        self._log.debug("uploading to account {}: {}".format(account.name, image))
+        try:
+            image.id = account.create_image(image)
+        except rift.mano.cloud.CloudAccountCalError as e:
+            msg = "error when uploading image {} to cloud account: {}".format(image.name, str(e))
+            self._log.error(msg)
+            raise ImageUploadError(msg) from e
+
+        self._log.debug('uploaded image (id: {}) to account{}: {}'.format(
+                        image.id, account.name, image.name))
+
+        return image.id
+
+    @asyncio.coroutine
+    def _do_upload(self):
+        try:
+            self.state = "CHECK_IMAGE_EXISTS"
+            has_image = yield from asyncio.wait_for(
+                    self._loop.run_in_executor(None, self._has_existing_image),
+                    timeout=self._timeout_exists,
+                    loop=self._loop
+                    )
+            if has_image:
+                self.state = "COMPLETED"
+                self._detail = "Image already exists on destination"
+                return
+
+            self.state = "UPLOADING"
+            self._detail = "Uploading image"
+
+            # Note that if the upload times out, the upload thread may still
+            # stick around.  We'll need another method of cancelling the task
+            # through the VALA interface.
+            image_id = yield from asyncio.wait_for(
+                    self._loop.run_in_executor(None, self._upload_image),
+                    timeout=self._timeout_upload,
+                    loop=self._loop
+                    )
+
+        except asyncio.CancelledError as e:
+            self.state = "CANCELLED"
+            self._detail = "Image upload cancelled"
+
+        except ImageUploadTaskError as e:
+            self.state = "FAILED"
+            self._detail = str(e)
+
+        except asyncio.TimeoutError as e:
+            self.state = "FAILED"
+            self._detail = "Timed out during upload task: %s" % str(e)
+
+        else:
+            # If the user does not provide a checksum and performs a URL source
+            # upload with an incorrect URL, then Glance does not indicate a failure
+            # and the CAL cannot detect an incorrect upload.  In this case, use
+            # the bytes_written to detect a bad upload and mark the task as failed.
+            if self._progress_info and self._progress_info.bytes_written == 0:
+                self.state = "FAILED"
+                self._detail = "No bytes written.  Possible bad image source."
+                return
+
+            self.state = "COMPLETED"
+            self._detail = "Image successfully uploaded.  Image id: %s" % image_id
+
+        finally:
+            self._stop_time = time.time()
+            self._upload_future = None
+
+    @asyncio.coroutine
+    def wait(self):
+        """ Wait for the upload task to complete """
+        if self._upload_future is None:
+            raise ImageUploadError("Task not started")
+
+        yield from asyncio.wait_for(
+                self._upload_future,
+                self._timeout_upload, loop=self._loop
+                )
+
+    def start(self):
+        """ Start the upload task """
+        if self._state != "QUEUED":
+            raise ImageUploadError("Task already started")
+
+        self._log.info("Starting %s", self)
+
+        self._upload_future = self._loop.create_task(self._do_upload())
+
+        return self._upload_future
+
+    def stop(self):
+        """ Stop the upload task in progress """
+        if self._upload_future is None:
+            self._log.warning("Cannot cancel %s.  Not in progress.", self)
+            return
+
+        self.state = "CANCELLING"
+        self._detail = "Cancellation has been requested"
+
+        self._log.info("Cancelling %s", self)
+        self._upload_future.cancel()
+        if self._write_canceller is not None:
+            self._write_canceller.stop()
diff --git a/rwlaunchpad/plugins/rwimagemgr/rwimagemgrtasklet.py b/rwlaunchpad/plugins/rwimagemgr/rwimagemgrtasklet.py
new file mode 100755
index 0000000..9fa34d2
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/rwimagemgrtasklet.py
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwimagemgr
+
+class Tasklet(rift.tasklets.rwimagemgr.ImageManagerTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwlaunchpad/plugins/rwimagemgr/test/CMakeLists.txt b/rwlaunchpad/plugins/rwimagemgr/test/CMakeLists.txt
new file mode 100644
index 0000000..4704724
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/test/CMakeLists.txt
@@ -0,0 +1,29 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 06/28/2016
+# 
+
+rift_py3test(utest_image_upload.py
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_image_upload.py
+  )
+
+rift_py3test(utest_dts_handlers.py
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_dts_handlers.py
+  )
+
diff --git a/rwlaunchpad/plugins/rwimagemgr/test/utest_dts_handlers.py b/rwlaunchpad/plugins/rwimagemgr/test/utest_dts_handlers.py
new file mode 100755
index 0000000..7ba4f76
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/test/utest_dts_handlers.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+
+import asyncio
+
+#asynctest looks for selectors under it's own package but in
+#python3.3 it exists under the asyncio package
+import sys
+sys.path.append(asyncio.__path__[0])
+import asynctest
+
+import logging
+import os
+import unittest
+import unittest.mock
+import xmlrunner
+
+import gi
+gi.require_version("RwDts", "1.0")
+gi.require_version("RwImageMgmtYang", "1.0")
+from gi.repository import (
+    RwDts,
+    RwImageMgmtYang,
+)
+
+import rift.tasklets
+import rift.test.dts
+
+from rift.tasklets.rwimagemgr import tasklet
+from rift.tasklets.rwimagemgr import upload
+
+from rift.test.dts import async_test
+
+import utest_image_upload
+
+
+def create_job_controller_mock():
+    jc_mock = unittest.mock.Mock(upload.ImageUploadJobController)
+
+    return jc_mock
+
+
+def create_upload_task_creator_mock():
+    creator_mock = asynctest.CoroutineMock(spec=["create_tasks_from_onboarded_create_rpc"])
+
+    return creator_mock
+
+
+class RwImageRPCTestCase(rift.test.dts.AbstractDTSTest):
+    @classmethod
+    def configure_schema(cls):
+        return RwImageMgmtYang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", self.id())
+        self.tinfo = self.new_tinfo(self.id())
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+
+        self.task_creator_mock = create_upload_task_creator_mock()
+        self.job_controller_mock = create_job_controller_mock()
+        self.rpc_handler = tasklet.ImageDTSRPCHandler(
+                self.log, self.loop, self.dts, {'mock', None}, object(), self.task_creator_mock,
+                self.job_controller_mock
+                )
+        self.show_handler = tasklet.ImageDTSShowHandler(
+                self.log, self.loop, self.dts, self.job_controller_mock
+                )
+
+        self.tinfo_c = self.new_tinfo(self.id() + "_client")
+        self.dts_c = rift.tasklets.DTS(self.tinfo_c, self.schema, self.loop)
+
+        self._upload_mixin = utest_image_upload.UploadTaskMixin(self.log, self.loop)
+        self._image_mock_mixin = utest_image_upload.ImageMockMixin(self)
+
+    @async_test
+    def test_create_job(self):
+        yield from self.rpc_handler.register()
+        yield from self.show_handler.register()
+
+        account = self._image_mock_mixin.account
+        with self._upload_mixin.create_upload_task(account) as upload_task:
+            self.task_creator_mock.create_tasks_from_onboarded_create_rpc.return_value = [upload_task]
+            self.job_controller_mock.create_job.return_value = 2
+            type(self.job_controller_mock).pb_msg = unittest.mock.PropertyMock(
+                    return_value=RwImageMgmtYang.UploadJobs.from_dict({
+                        "job": [
+                            {
+                                "id": 2,
+                                "upload_tasks": [upload_task.pb_msg],
+                                "status": "COMPLETED"
+                            }
+                        ]
+                    })
+                  )
+
+            create_job_msg = RwImageMgmtYang.CreateUploadJob.from_dict({
+                "cloud_account": [upload_task.cloud_account],
+                "onboarded_image": {
+                    "image_name": upload_task.image_name,
+                    "image_checksum": upload_task.image_checksum,
+                }
+            })
+
+            query_iter = yield from self.dts_c.query_rpc(
+                    "I,/rw-image-mgmt:create-upload-job",
+                    0,
+                    create_job_msg,
+                    )
+
+            for fut_resp in query_iter:
+                rpc_result = (yield from fut_resp).result
+
+            self.assertEqual(2, rpc_result.job_id)
+
+            self.assertTrue(
+                    self.task_creator_mock.create_tasks_from_onboarded_create_rpc.called
+                    )
+
+            query_iter = yield from self.dts_c.query_read(
+                    "D,/rw-image-mgmt:upload-jobs",
+                    )
+
+            for fut_resp in query_iter:
+                rpc_result = (yield from fut_resp).result
+                self.assertEqual(1, len(rpc_result.job))
+                self.assertEqual(2, rpc_result.job[0].id)
+                self.assertEqual(1, len(rpc_result.job[0].upload_tasks))
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwimagemgr/test/utest_image_upload.py b/rwlaunchpad/plugins/rwimagemgr/test/utest_image_upload.py
new file mode 100755
index 0000000..9d4464f
--- /dev/null
+++ b/rwlaunchpad/plugins/rwimagemgr/test/utest_image_upload.py
@@ -0,0 +1,511 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import asyncio
+import contextlib
+import io
+import logging
+import os
+import sys
+import tempfile
+import time
+import unittest
+import uuid
+import xmlrunner
+
+from rift.mano import cloud
+from rift.tasklets.rwimagemgr import upload
+from rift.package import checksums
+from rift.test.dts import async_test
+import rw_status
+
+import gi
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwLog', '1.0')
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+        RwCal,
+        RwCloudYang,
+        RwLog,
+        RwTypes,
+        RwcalYang,
+        )
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class CreateImageMock(object):
+    def __init__(self, log):
+        self._log = log
+        self.image_name = None
+        self.image_checksum = None
+
+        self.do_fail = False
+        self.do_read_slow = False
+
+        self._image_msgs = []
+
+    def add_existing_image(self, image_msg):
+        self._log.debug("Appending existing image msg: %s", image_msg)
+        self._image_msgs.append(image_msg)
+
+    @rwstatus
+    def do_create_image(self, _, image):
+        if self.do_fail:
+            self._log.debug("Simulating failure")
+            raise ValueError("FAILED")
+
+        if not image.has_field("fileno"):
+            raise ValueError("Image must have fileno")
+
+        self.image_name = image.name
+
+        # Create a duplicate file descriptor to allow this code to own
+        # its own descritor (and thus close it) and allow the client to
+        # own and close its own descriptor.
+        new_fileno = os.dup(image.fileno)
+        with os.fdopen(new_fileno, 'rb') as hdl:
+            bytes_hdl = io.BytesIO()
+            if self.do_read_slow:
+                self._log.debug("slow reading from mock cal")
+                try:
+                    num_bytes = 0
+                    while True:
+                        d = os.read(new_fileno, 1024)
+                        num_bytes += len(d)
+                        bytes_hdl.write(d)
+                        if not d:
+                            self._log.debug("read %s bytes", num_bytes)
+                            return
+
+                        time.sleep(.05)
+
+                except Exception as e:
+                    self._log.warning("caught exception when reading: %s",
+                                      str(e))
+                    raise
+
+            else:
+                bytes_hdl.write(hdl.read())
+
+            bytes_hdl.seek(0)
+            self.image_checksum = checksums.checksum(bytes_hdl)
+            bytes_hdl.close()
+
+        return str(uuid.uuid4())
+
+    @rwstatus
+    def do_get_image_list(self, account):
+        boxed_image_list = RwcalYang.VimResources()
+        for msg in self._image_msgs:
+            boxed_image_list.imageinfo_list.append(msg)
+
+        return boxed_image_list
+
+
+def create_random_image_file():
+    with open("/dev/urandom", "rb") as rand_hdl:
+        file_hdl = tempfile.NamedTemporaryFile("r+b")
+        file_hdl.write(rand_hdl.read(1 * 1024 * 1024))
+        file_hdl.flush()
+        file_hdl.seek(0)
+        return file_hdl
+
+
+def get_file_hdl_gen(file_hdl):
+    while True:
+        try:
+            d = file_hdl.read(64)
+        except ValueError:
+            return
+
+        if not d:
+            return
+
+        yield d
+
+
+def get_image_checksum(image_hdl):
+    image_checksum = checksums.checksum(image_hdl)
+    image_hdl.seek(0)
+    return image_checksum
+
+
+def create_image_info(image_name, image_checksum):
+    image = RwcalYang.ImageInfoItem()
+    image.name = image_name
+    image.checksum = image_checksum
+    image.disk_format = os.path.splitext(image_name)[1][1:]
+    image.container_format = "bare"
+
+    return image
+
+
+class UploadTaskMixin(object):
+    def __init__(self, log, loop):
+        self._log = log
+        self._loop = loop
+
+    def create_image_hdl(self):
+        image_hdl = create_random_image_file()
+
+        return image_hdl
+
+    @contextlib.contextmanager
+    def create_upload_task(self, account, image_name="test.qcow2",
+                           image_checksum=None, image_info=None):
+
+        with self.create_image_hdl() as image_hdl:
+
+            image_checksum = get_image_checksum(image_hdl) \
+                if image_checksum is None else image_checksum
+
+            image_info = create_image_info(image_name, image_checksum) \
+                if image_info is None else image_info
+
+            iter_hdl = get_file_hdl_gen(image_hdl)
+            pipe_gen = upload.GlanceImagePipeGen(self._log, self._loop, iter_hdl)
+
+            upload_task = upload.AccountImageUploadTask(
+                    self._log, self._loop, account, image_info, pipe_gen.read_hdl,
+                    write_canceller=pipe_gen
+                    )
+            pipe_gen.start()
+
+            yield upload_task
+
+
+class ImageMockMixin(object):
+    ACCOUNT_MSG = RwCloudYang.CloudAccount(
+        name="mock",
+        account_type="mock",
+        )
+
+    def __init__(self, log):
+        self._log = log
+        self._account = cloud.CloudAccount(
+                self._log,
+                RwLog.Ctx.new(__file__), ImageMockMixin.ACCOUNT_MSG
+                )
+
+        self._create_image_mock = CreateImageMock(self._log)
+
+        # Mock the create_image call
+        self._account.cal.create_image = self._create_image_mock.do_create_image
+        self._account.cal.get_image_list = self._create_image_mock.do_get_image_list
+
+    @property
+    def account(self):
+        return self._account
+
+    @property
+    def image_mock(self):
+        return self._create_image_mock
+
+
+class TestImageUploadTask(unittest.TestCase, UploadTaskMixin, ImageMockMixin):
+    def __init__(self, *args, **kwargs):
+        self._loop = asyncio.get_event_loop()
+        self._log = logging.getLogger(__file__)
+
+        ImageMockMixin.__init__(self, self._log)
+        UploadTaskMixin.__init__(self, self._log, self._loop)
+        unittest.TestCase.__init__(self, *args, **kwargs)
+
+    @async_test
+    def test_upload_image_task(self):
+        with self.create_upload_task(self.account) as upload_task:
+            yield from upload_task.start()
+
+        self.assertIn("QUEUED", upload_task.state_stack)
+        self.assertIn("CHECK_IMAGE_EXISTS", upload_task.state_stack)
+        self.assertIn("UPLOADING", upload_task.state_stack)
+        self.assertIn("COMPLETED", upload_task.state_stack)
+
+        self.assertEqual("COMPLETED", upload_task.state)
+
+        self.assertEqual(self.image_mock.image_name, upload_task.image_name)
+        self.assertEqual(self.image_mock.image_checksum, upload_task.image_checksum)
+
+        task_pb_msg = upload_task.pb_msg
+        self.assertEqual(upload_task.image_name, task_pb_msg.image_name)
+
+    @async_test
+    def test_cancel_image_task(self):
+        @asyncio.coroutine
+        def wait_for_task_state(upload_task, state, timeout=10):
+            start_time = time.time()
+            while (time.time() - start_time) < timeout:
+                if upload_task.state == state:
+                    return
+
+                yield from asyncio.sleep(.01)
+
+            raise asyncio.TimeoutError()
+
+        self.image_mock.do_read_slow = True
+
+        with self.create_upload_task(self.account) as upload_task:
+            upload_task.start()
+            yield from wait_for_task_state(upload_task, "UPLOADING")
+            upload_task.stop()
+            self.assertEqual("CANCELLING", upload_task.state)
+            yield from wait_for_task_state(upload_task, "CANCELLED")
+
+    @async_test
+    def test_create_image_failed(self):
+        self.image_mock.do_fail = True
+
+        with self.create_upload_task(self.account) as upload_task:
+            yield from upload_task.start()
+
+        self.assertEqual("FAILED", upload_task.state)
+
+    @async_test
+    def test_create_image_name_and_checksum_exists(self):
+        with self.create_upload_task(self.account) as upload_task:
+            image_entry = RwcalYang.ImageInfoItem(
+                    id="asdf",
+                    name=upload_task.image_name,
+                    checksum=upload_task.image_checksum
+                    )
+            self.image_mock.add_existing_image(image_entry)
+
+            yield from upload_task.start()
+
+        # No image should have been uploaded, since the name and checksum
+        self.assertEqual(self.image_mock.image_checksum, None)
+
+        self.assertEqual("COMPLETED", upload_task.state)
+        self.assertTrue("UPLOADING" not in upload_task.state_stack)
+
+
+class TestUploadJob(unittest.TestCase, UploadTaskMixin, ImageMockMixin):
+    def __init__(self, *args, **kwargs):
+        self._loop = asyncio.get_event_loop()
+        self._log = logging.getLogger(__file__)
+
+        ImageMockMixin.__init__(self, self._log)
+        UploadTaskMixin.__init__(self, self._log, self._loop)
+        unittest.TestCase.__init__(self, *args, **kwargs)
+
+    @async_test
+    def test_single_task_upload_job(self):
+        with self.create_upload_task(self.account) as upload_task:
+            job = upload.ImageUploadJob(self._log, self._loop, [upload_task])
+            self.assertEqual("QUEUED", job.state)
+            yield from job.start()
+
+        self.assertIn("QUEUED", job.state_stack)
+        self.assertIn("IN_PROGRESS", job.state_stack)
+        self.assertIn("COMPLETED", job.state_stack)
+
+        self.assertEqual("COMPLETED", job.state)
+
+        job_pb_msg = job.pb_msg
+        self.assertEqual("COMPLETED", job_pb_msg.status)
+
+    @async_test
+    def test_multiple_tasks_upload_job(self):
+        with self.create_upload_task(self.account) as upload_task1:
+            with self.create_upload_task(self.account) as upload_task2:
+                job = upload.ImageUploadJob(
+                        self._log, self._loop, [upload_task1, upload_task2])
+                yield from job.start()
+
+        self.assertEqual("COMPLETED", job.state)
+
+    @async_test
+    def test_failed_task_in_job(self):
+        self.image_mock.do_fail = True
+
+        with self.create_upload_task(self.account) as upload_task:
+            job = upload.ImageUploadJob(
+                    self._log, self._loop, [upload_task])
+            yield from job.start()
+
+        self.assertEqual("FAILED", job.state)
+
+    @async_test
+    def test_cancel_job(self):
+        @asyncio.coroutine
+        def wait_for_job_state(upload_job, state, timeout=10):
+            start_time = time.time()
+            while (time.time() - start_time) < timeout:
+                if upload_job.state == state:
+                    return
+
+                yield from asyncio.sleep(.01)
+
+            raise asyncio.TimeoutError()
+
+        self.image_mock.do_read_slow = True
+
+        with self.create_upload_task(self.account) as upload_task:
+            job = upload.ImageUploadJob(
+                    self._log, self._loop, [upload_task])
+            job.start()
+            yield from wait_for_job_state(job, "IN_PROGRESS")
+            job.stop()
+            self.assertEqual("CANCELLING", job.state)
+            yield from wait_for_job_state(job, "CANCELLED")
+
+        self.assertEqual("CANCELLED", job.state)
+
+
+class TestUploadJobController(unittest.TestCase, UploadTaskMixin, ImageMockMixin):
+    def __init__(self, *args, **kwargs):
+        self._loop = asyncio.get_event_loop()
+        self._log = logging.getLogger(__file__)
+
+        ImageMockMixin.__init__(self, self._log)
+        unittest.TestCase.__init__(self, *args, **kwargs)
+
+    @async_test
+    def test_controller_single_task_job(self):
+        controller = upload.ImageUploadJobController(
+                self._log, self._loop
+                )
+
+        with self.create_upload_task(self.account) as upload_task:
+            job_id = controller.create_job([upload_task])
+            self.assertEqual(len(controller.active_jobs), 1)
+            self.assertEqual(len(controller.completed_jobs), 0)
+
+            job = controller.get_job(job_id)
+            yield from job.wait()
+
+            self.assertEqual(len(controller.active_jobs), 0)
+            self.assertEqual(len(controller.completed_jobs), 1)
+
+            upload_jobs_pb_msg = controller.pb_msg
+            self.assertEqual(len(upload_jobs_pb_msg.job), 1)
+
+    @async_test
+    def test_controller_multi_task_job(self):
+        controller = upload.ImageUploadJobController(
+                self._log, self._loop
+                )
+
+        with self.create_upload_task(self.account) as upload_task1:
+            with self.create_upload_task(self.account) as upload_task2:
+                job_id = controller.create_job([upload_task1, upload_task2])
+                self.assertEqual(len(controller.active_jobs), 1)
+                self.assertEqual(len(controller.completed_jobs), 0)
+
+                job = controller.get_job(job_id)
+                yield from job.wait()
+                self.assertEqual(len(controller.active_jobs), 0)
+                self.assertEqual(len(controller.completed_jobs), 1)
+
+    @async_test
+    def test_controller_multi_jobs(self):
+        controller = upload.ImageUploadJobController(
+                self._log, self._loop
+                )
+
+        with self.create_upload_task(self.account) as upload_task1:
+            with self.create_upload_task(self.account) as upload_task2:
+                job1_id = controller.create_job([upload_task1])
+                job2_id = controller.create_job([upload_task2])
+                self.assertEqual(len(controller.active_jobs), 2)
+                self.assertEqual(len(controller.completed_jobs), 0)
+
+                job1 = controller.get_job(job1_id)
+                job2 = controller.get_job(job2_id)
+
+                yield from asyncio.wait(
+                        [job1.wait(), job2.wait()],
+                        loop=self._loop
+                        )
+
+                self.assertEqual(len(controller.active_jobs), 0)
+                self.assertEqual(len(controller.completed_jobs), 2)
+
+
+class TestRateCalc(unittest.TestCase):
+    def test_no_smoothing(self):
+        calc = upload.ByteRateCalculator(1)
+        self.assertEqual(0, calc.rate)
+        calc.add_measurement(100, 1)
+        self.assertEqual(100, calc.rate)
+        calc.add_measurement(400, 2)
+        self.assertEqual(200, calc.rate)
+
+    def test_smoothing(self):
+        calc = upload.ByteRateCalculator(2)
+        calc.add_measurement(100, 1)
+        self.assertEqual(100, calc.rate)
+
+        calc.add_measurement(400, 2)
+        self.assertEqual(150, calc.rate)
+
+        calc.add_measurement(400, 2)
+        self.assertEqual(175, calc.rate)
+
+
+class TestUploadProgress(unittest.TestCase):
+    def setUp(self):
+        self._loop = asyncio.get_event_loop()
+        self._log = logging.getLogger(__file__)
+
+    def test_write_proxy(self):
+        mem_hdl = io.BytesIO()
+        proxy = upload.UploadProgressWriteProxy(self._log, self._loop, 1000, mem_hdl)
+
+        data = b'test_bytes'
+
+        proxy.write(data)
+        self.assertEqual(data, mem_hdl.getvalue())
+        self.assertEqual(len(data), proxy.bytes_written)
+        self.assertEqual(1000, proxy.bytes_total)
+        self.assertEqual(1, proxy.progress_percent)
+
+        proxy.close()
+        self.assertTrue(mem_hdl.closed)
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/CMakeLists.txt b/rwlaunchpad/plugins/rwlaunchpadtasklet/CMakeLists.txt
new file mode 100644
index 0000000..b1f6a7f
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/CMakeLists.txt
@@ -0,0 +1,67 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 05/15/2015
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwlaunchpad)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/convert_pkg.py
+    rift/tasklets/${TASKLET_NAME}/datacenters.py
+    rift/tasklets/${TASKLET_NAME}/export.py
+    rift/tasklets/${TASKLET_NAME}/extract.py
+    rift/tasklets/${TASKLET_NAME}/image.py
+    rift/tasklets/${TASKLET_NAME}/message.py
+    rift/tasklets/${TASKLET_NAME}/onboard.py
+    rift/tasklets/${TASKLET_NAME}/state.py
+    rift/tasklets/${TASKLET_NAME}/tasklet.py
+    rift/tasklets/${TASKLET_NAME}/tosca.py
+    rift/tasklets/${TASKLET_NAME}/uploader.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
+
+rift_python_install_tree(
+  FILES
+    rift/package/__init__.py
+    rift/package/archive.py
+    rift/package/charm.py
+    rift/package/checksums.py
+    rift/package/config.py
+    rift/package/convert.py
+    rift/package/icon.py
+    rift/package/image.py
+    rift/package/package.py
+    rift/package/script.py
+    rift/package/store.py
+    rift/package/cloud_init.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
+
+rift_add_subdirs(test scripts)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/Makefile b/rwlaunchpad/plugins/rwlaunchpadtasklet/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/__init__.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/__init__.py
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/archive.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/archive.py
new file mode 100644
index 0000000..fffce99
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/archive.py
@@ -0,0 +1,152 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import io
+import os
+import tarfile
+import time
+
+from . import package
+
+class ArchiveError(Exception):
+    pass
+
+
+def get_size(hdl):
+    """ Get number of bytes of content within file hdl
+    Set the file position to original position before returning
+
+    Returns:
+        Number of bytes in the hdl file object
+    """
+    old_pos = hdl.tell()
+    hdl.seek(0, os.SEEK_END)
+    size = hdl.tell()
+    hdl.seek(old_pos)
+
+    return size
+
+
+class TarPackageArchive(object):
+    """  This class represents a package stored within a tar.gz archive file """
+    def __init__(self, log, tar_file_hdl, mode="r"):
+        self._log = log
+        self._tar_filehdl = tar_file_hdl
+        self._tar_infos = {}
+
+        self._tarfile = tarfile.open(fileobj=tar_file_hdl, mode=mode)
+
+        self.load_archive()
+
+    @classmethod
+    def from_package(cls, log, pkg, tar_file_hdl):
+        """ Creates a TarPackageArchive from a existing Package
+
+        Arguments:
+            log - logger
+            pkg - a DescriptorPackage instance
+            tar_file_hdl - a writeable file handle to write tar archive data
+
+        Returns:
+            A TarPackageArchive instance
+        """
+
+        def set_common_tarinfo_fields(tar_info):
+            tar_info.uid = os.getuid()
+            tar_info.gid = os.getgid()
+            tar_info.mtime = time.time()
+            tar_info.uname = "rift"
+            tar_info.gname = "rift"
+
+        archive = TarPackageArchive(log, tar_file_hdl, mode='w:gz')
+        for pkg_file in pkg.files:
+            tar_info = tarfile.TarInfo(name=pkg_file)
+            tar_info.type = tarfile.REGTYPE
+            tar_info.mode = pkg.get_file_mode(pkg_file)
+            set_common_tarinfo_fields(tar_info)
+            with pkg.open(pkg_file) as pkg_file_hdl:
+                tar_info.size = get_size(pkg_file_hdl)
+                archive.tarfile.addfile(tar_info, pkg_file_hdl)
+
+        for pkg_dir in pkg.dirs:
+            tar_info = tarfile.TarInfo(name=pkg_dir)
+            tar_info.type = tarfile.DIRTYPE
+            tar_info.mode = 0o775
+            set_common_tarinfo_fields(tar_info)
+            archive.tarfile.addfile(tar_info)
+
+        archive.load_archive()
+        archive.close()
+
+        return archive
+
+    def __repr__(self):
+        return "TarPackageArchive(%s)" % self._tar_filehdl
+
+    def __del__(self):
+        self.close()
+
+    def close(self):
+        """ Close the opened tarfile"""
+        if self._tarfile is not None:
+            self._tarfile.close()
+            self._tarfile = None
+
+    def load_archive(self):
+        self._tar_infos = {info.name: info for info in self._tarfile.getmembers() if info.name}
+
+    @property
+    def tarfile(self):
+        return self._tarfile
+
+    @property
+    def filenames(self):
+        """ The list of file members within the tar file """
+        return [name for name in self._tar_infos if tarfile.TarInfo.isfile(self._tar_infos[name])]
+
+    def open_file(self, rel_file_path):
+        """ Opens a file within the archive as read-only, byte mode.
+
+        Arguments:
+            rel_file_path - The file path within the archive to open
+
+        Returns:
+            A file like object (see tarfile.extractfile())
+
+        Raises:
+            FileNotFoundError - The file could not be found within the archive.
+            ArchiveError - The file could not be opened for some generic reason.
+        """
+        if rel_file_path not in self._tar_infos:
+            raise FileNotFoundError("Could not find %s in tar file", rel_file_path)
+
+        try:
+            return self._tarfile.extractfile(rel_file_path)
+        except tarfile.TarError as e:
+            msg = "Failed to read file {} from tarfile {}: {}".format(
+                  rel_file_path, self._tar_filehdl, str(e)
+                  )
+            self._log.error(msg)
+            raise ArchiveError(msg) from e
+
+    def create_package(self):
+        """  Creates a Descriptor package from the archive contents """
+        pkg = package.DescriptorPackage.from_package_files(self._log, self.open_file, self.filenames)
+        for pkg_file in self.filenames:
+            pkg.add_file(pkg_file, self._tar_infos[pkg_file].mode)
+
+        return pkg
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/charm.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/charm.py
new file mode 100644
index 0000000..d907731
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/charm.py
@@ -0,0 +1,96 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+import os.path
+
+from . import package
+
+
+class CharmExtractionError(Exception):
+    pass
+
+
+class PackageCharmExtractor(object):
+    """ This class is reponsible for extracting charms to the correct directory
+
+    In order to remain compatible with the existing Jujuclient, we extract the charms
+    to a known location (RIFT-13282)
+    """
+    DEFAULT_INSTALL_DIR = os.path.join(
+            os.environ["RIFT_ARTIFACTS"],
+            "launchpad"
+            )
+
+    CHARM_REGEX = "{prefix}charms/(trusty/)?(?P<charm_name>[^/]+)$"
+
+    def __init__(self, log, install_dir=DEFAULT_INSTALL_DIR):
+        self._log = log
+        self._install_dir = install_dir
+
+    def _get_rel_dest_path(self, descriptor_id, charm_name):
+        dest_rel_path = "libs/{}/charms/trusty/{}".format(descriptor_id, charm_name)
+        dest_path = os.path.join(self._install_dir, dest_rel_path)
+        return dest_path
+
+    @classmethod
+    def charm_dir_map(cls, package):
+        charm_map = {}
+        regex = cls.CHARM_REGEX.format(prefix=package.prefix)
+
+        for dir_name in package.dirs:
+            match = re.match(
+                    cls.CHARM_REGEX.format(prefix=package.prefix), dir_name,
+                    )
+            if match is None:
+                continue
+
+            charm_name = match.group("charm_name")
+            if charm_name == "trusty":
+                continue
+
+            charm_map[charm_name] = dir_name
+
+        return charm_map
+
+    def get_extracted_charm_dir(self, package_id, charm_name):
+        return os.path.join(
+                self._get_rel_dest_path(package_id, charm_name),
+                )
+
+    def extract_charms(self, pkg):
+        """ Extract charms contained within the DescriptorPackage
+        to the known charm directory.
+
+        Arguments:
+            pkg - The descriptor package that MAY contain charm directories
+
+        Raises:
+            CharmExtractionError - Charms in the package failed to get extracted
+        """
+        descriptor_id = pkg.descriptor_id
+        charm_dir_map = PackageCharmExtractor.charm_dir_map(pkg)
+
+        for charm_name, charm_dir in charm_dir_map.items():
+            dest_rel_path = self._get_rel_dest_path(descriptor_id, charm_name)
+            dest_path = os.path.join(self._install_dir, dest_rel_path)
+
+            self._log.debug("Extracting %s charm to %s", charm_name, dest_path)
+            try:
+                pkg.extract_dir(charm_dir, dest_path)
+            except package.ExtractError as e:
+                raise CharmExtractionError("Failed to extract charm %s" % charm_name) from e
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/checksums.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/checksums.py
new file mode 100644
index 0000000..975967e
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/checksums.py
@@ -0,0 +1,79 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import hashlib
+import re
+
+def checksum_string(s):
+    return hashlib.md5(s.encode('utf-8')).hexdigest()
+
+
+def checksum(fd):
+    """ Calculate a md5 checksum of fd file handle
+
+    Arguments:
+      fd: A file descriptor return from open() call
+
+    Returns:
+      A md5 checksum of the file
+
+    """
+    pos = fd.tell()
+    try:
+        current = hashlib.md5()
+        while True:
+            data = fd.read(2 ** 16)
+            if len(data) == 0:
+                return current.hexdigest()
+            current.update(data)
+    finally:
+        fd.seek(pos)
+
+
+class ArchiveChecksums(dict):
+    @classmethod
+    def from_file_desc(cls, fd):
+        checksum_pattern = re.compile(r"(\S+)\s+(\S+)")
+        checksums = dict()
+
+        pos = fd.tell()
+        try:
+            for line in (line.decode('utf-8').strip() for line in fd if line):
+
+                # Skip comments
+                if line.startswith('#'):
+                    continue
+
+                # Skip lines that do not contain the pattern we are looking for
+                result = checksum_pattern.search(line)
+                if result is None:
+                    continue
+
+                chksum, filepath = result.groups()
+                checksums[filepath] = chksum
+
+        finally:
+            fd.seek(pos)
+
+        return cls(checksums)
+
+    def to_string(self):
+        string = ""
+        for file_name, file_checksum in self.items():
+            string += "{}  {}\n".format(file_name, file_checksum)
+
+        return string
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/cloud_init.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/cloud_init.py
new file mode 100644
index 0000000..78c258c
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/cloud_init.py
@@ -0,0 +1,76 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+import os.path
+
+from . import package
+
+
+class CloudInitExtractionError(Exception):
+    pass
+
+
+class PackageCloudInitExtractor(object):
+    """ This class is reponsible for extracting cloud_init scripts to the correct directory
+    """
+
+    SCRIPT_REGEX = "{prefix}/?cloud_init/(?P<script_name>[^/]+)$"
+
+    def __init__(self, log):
+        self._log = log
+
+    @classmethod
+    def package_script_files(cls, package):
+        script_file_map = {}
+
+        for file_name in package.files:
+            match = re.match(
+                    cls.SCRIPT_REGEX.format(prefix=package.prefix),
+                    file_name,
+                    )
+            if match is None:
+                continue
+
+            script_name = match.group("script_name")
+            script_file_map[script_name] = file_name
+
+        return script_file_map
+
+    def read_script(self, pkg, filename):
+        descriptor_id = pkg.descriptor_id
+        script_files = PackageCloudInitExtractor.package_script_files(pkg)
+
+        for script_name, script_file in script_files.items():
+            if script_name == filename:
+                self._log.debug("Found %s script file in package at %s", filename, script_file)
+
+                try:
+                    with pkg.open(script_file) as f:
+                        userdata = f.read()
+                        self._log.info("cloud_init read from file %s", userdata)
+                        # File contents are read in binary string, decode to regular string and return
+                        return userdata.decode()
+                except package.ExtractError as e:
+                    raise CloudInitExtractionError("Failed to extract script %s" % script_name) from e
+
+        # If we've reached this point but not found a matching cloud_init script, 
+        # raise an Exception, since we got here only because there was supposed 
+        # to be a cloud_init_file in the VDU
+        errmsg = "No cloud-init config file found in the descriptor package"
+        self._log.error(errmsg)
+        raise CloudInitExtractionError(errmsg)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/config.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/config.py
new file mode 100644
index 0000000..9a06116
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/config.py
@@ -0,0 +1,93 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+import os.path
+
+from . import package
+
+
+class ConfigExtractionError(Exception):
+    pass
+
+
+class PackageConfigExtractor(object):
+    """ This class is reponsible for extracting config data to the correct directory
+
+    In order to remain compatible with the existing ConfigManager, we extract the config
+    to a known location (RIFT-13282)
+    """
+    DEFAULT_INSTALL_DIR = os.path.join(
+            os.environ["RIFT_ARTIFACTS"],
+            "launchpad"
+            )
+
+    CONFIG_REGEX = "{prefix}(ns_config|vnf_config)/(?P<config_name>[^/]+.yaml)$"
+
+    def __init__(self, log, install_dir=DEFAULT_INSTALL_DIR):
+        self._log = log
+        self._install_dir = install_dir
+
+    def _get_rel_dest_path(self, descriptor_id, config_name):
+        dest_rel_path = "libs/{}/config/{}".format(descriptor_id, config_name)
+        dest_path = os.path.join(self._install_dir, dest_rel_path)
+        return dest_path
+
+    @classmethod
+    def package_config_files(cls, package):
+        config_map = {}
+        regex = cls.CONFIG_REGEX.format(prefix=package.prefix)
+
+        for file_name in package.files:
+            match = re.match(
+                    cls.CONFIG_REGEX.format(prefix=package.prefix), file_name,
+                    )
+            if match is None:
+                continue
+
+            config_name = match.group("config_name")
+
+            config_map[config_name] = file_name
+
+        return config_map
+
+    def get_extracted_config_path(self, package_id, config_name):
+        return os.path.join(
+                self._get_rel_dest_path(package_id, os.path.basename(config_name)),
+                )
+
+    def extract_configs(self, pkg):
+        """ Extract any configuration files from the DescriptorPackage
+
+        Arguments:
+            pkg - A DescriptorPackage
+
+        Raises:
+            ConfigExtractionError - The configuration could not be extracted
+        """
+        descriptor_id = pkg.descriptor_id
+
+        config_files = PackageConfigExtractor.package_config_files(pkg).items()
+        for config_name, config_file in config_files:
+            dest_rel_path = self._get_rel_dest_path(descriptor_id, config_name)
+            dest_path = os.path.join(self._install_dir, dest_rel_path)
+
+            self._log.debug("Extracting %s config to %s", config_name, dest_path)
+            try:
+                pkg.extract_file(config_file, dest_path)
+            except package.ExtractError as e:
+                raise ConfigExtractionError("Failed to extract config %s" % config_name) from e
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/convert.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/convert.py
new file mode 100644
index 0000000..7571c57
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/convert.py
@@ -0,0 +1,283 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import json
+import os
+import tempfile
+
+import gi
+gi.require_version('RwNsdYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwYang', '1.0')
+from gi.repository import (
+        RwNsdYang,
+        RwVnfdYang,
+        NsdYang,
+        VnfdYang,
+        RwYang,
+        )
+
+
+class UnknownExtensionError(Exception):
+    pass
+
+
+class SerializationError(Exception):
+    pass
+
+
+def decode(desc_data):
+    if isinstance(desc_data, bytes):
+        desc_data = desc_data.decode()
+
+    return desc_data
+
+
+class ProtoMessageSerializer(object):
+    """(De)Serializer/deserializer fo a specific protobuf message into various formats"""
+    libncx_model = None
+
+    def __init__(self, yang_ns, yang_pb_cls):
+        """ Create a serializer for a specific protobuf message """
+        self._yang_ns = yang_ns
+        self._yang_pb_cls = yang_pb_cls
+
+    @classmethod
+    def _deserialize_extension_method_map(cls):
+        return {
+                ".xml": cls._from_xml_file_hdl,
+                ".yml": cls._from_yaml_file_hdl,
+                ".yaml": cls._from_yaml_file_hdl,
+                ".json": cls._from_json_file_hdl,
+                }
+
+    @classmethod
+    def _serialize_extension_method_map(cls):
+        return {
+                ".xml": cls.to_xml_string,
+                ".yml": cls.to_yaml_string,
+                ".yaml": cls.to_yaml_string,
+                ".json": cls.to_json_string,
+                }
+
+    @classmethod
+    def is_supported_file(cls, filename):
+        """Returns whether a file has a supported file extension
+
+        Arguments:
+            filename - A descriptor file
+
+        Returns:
+            True if file extension is supported, False otherwise
+
+        """
+        _, extension = os.path.splitext(filename)
+        extension_lc = extension.lower()
+
+        return extension_lc in cls._deserialize_extension_method_map()
+
+    @property
+    def yang_namespace(self):
+        """ The Protobuf's GI namespace class (e.g. RwVnfdYang) """
+        return self._yang_ns
+
+    @property
+    def yang_class(self):
+        """ The Protobuf's GI class (e.g. RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd) """
+        return self._yang_pb_cls
+
+    @property
+    def model(self):
+        cls = self.__class__
+
+        # Cache the libncx model for the serializer class
+        if cls.libncx_model is None:
+            cls.libncx_model = RwYang.model_create_libncx()
+            cls.libncx_model.load_schema_ypbc(self.yang_namespace.get_schema())
+
+        return cls.libncx_model
+
+    def _from_xml_file_hdl(self, file_hdl):
+        xml = file_hdl.read()
+
+        return self.yang_class.from_xml_v2(self.model, decode(xml), strict=False)
+
+    def _from_json_file_hdl(self, file_hdl):
+        json = file_hdl.read()
+
+        return self.yang_class.from_json(self.model, decode(json), strict=False)
+
+    def _from_yaml_file_hdl(self, file_hdl):
+        yaml = file_hdl.read()
+
+        return self.yang_class.from_yaml(self.model, decode(yaml), strict=False)
+
+    def to_json_string(self, pb_msg):
+        """ Serialize a protobuf message into JSON
+
+        Arguments:
+            pb_msg - A GI-protobuf object of type provided into constructor
+
+        Returns:
+            A JSON string representing the protobuf message
+
+        Raises:
+            SerializationError - Message could not be serialized
+            TypeError - Incorrect protobuf type provided
+        """
+        if not isinstance(pb_msg, self._yang_pb_cls):
+            raise TypeError("Invalid protobuf message type provided")
+
+        try:
+            json_str = pb_msg.to_json(self.model)
+
+        except Exception as e:
+            raise SerializationError(e)
+
+        return json_str
+
+    def to_yaml_string(self, pb_msg):
+        """ Serialize a protobuf message into YAML
+
+        Arguments:
+            pb_msg - A GI-protobuf object of type provided into constructor
+
+        Returns:
+            A YAML string representing the protobuf message
+
+        Raises:
+            SerializationError - Message could not be serialized
+            TypeError - Incorrect protobuf type provided
+        """
+        if not isinstance(pb_msg, self._yang_pb_cls):
+            raise TypeError("Invalid protobuf message type provided")
+
+        try:
+            yaml_str = pb_msg.to_yaml(self.model)
+
+        except Exception as e:
+            raise SerializationError(e)
+
+        return yaml_str
+
+    def to_xml_string(self, pb_msg):
+        """ Serialize a protobuf message into XML
+
+        Arguments:
+            pb_msg - A GI-protobuf object of type provided into constructor
+
+        Returns:
+            A XML string representing the protobuf message
+
+        Raises:
+            SerializationError - Message could not be serialized
+            TypeError - Incorrect protobuf type provided
+        """
+        if not isinstance(pb_msg, self._yang_pb_cls):
+            raise TypeError("Invalid protobuf message type provided")
+
+        try:
+            xml_str = pb_msg.to_xml_v2(self.model)
+
+        except Exception as e:
+            raise SerializationError(e)
+
+        return xml_str
+
+    def from_file_hdl(self, file_hdl, extension):
+        """ Returns the deserialized protobuf message from file contents
+
+        This function determines the serialization format based on file extension
+
+        Arguments:
+            file_hdl - The file hdl to deserialize (set at pos 0)
+            extension - Extension of the file format (second item of os.path.splitext())
+
+        Returns:
+            A GI-Proto message of type that was provided into the constructor
+
+        Raises:
+            UnknownExtensionError - File extension is not of a known serialization format
+            SerializationError - File failed to be deserialized into the protobuf message
+        """
+
+        extension_lc = extension.lower()
+        extension_map = self._deserialize_extension_method_map()
+
+        if extension_lc not in extension_map:
+            raise UnknownExtensionError("Cannot detect message format for %s extension" % extension_lc)
+
+        try:
+            msg = extension_map[extension_lc](self, file_hdl)
+        except Exception as e:
+            raise SerializationError(e)
+
+        return msg
+
+    def to_string(self, pb_msg, extension):
+        """ Returns the serialized protobuf message for a particular file extension
+
+        This function determines the serialization format based on file extension
+
+        Arguments:
+            pb_msg - A GI-protobuf object of type provided into constructor
+            extension - Extension of the file format (second item of os.path.splitext())
+
+        Returns:
+            A GI-Proto message of type that was provided into the constructor
+
+        Raises:
+            UnknownExtensionError - File extension is not of a known serialization format
+            SerializationError - File failed to be deserialized into the protobuf message
+        """
+
+        extension_lc = extension.lower()
+        extension_map = self._serialize_extension_method_map()
+
+        if extension_lc not in extension_map:
+            raise UnknownExtensionError("Cannot detect message format for %s extension" % extension_lc)
+
+        try:
+            msg = extension_map[extension_lc](self, pb_msg)
+        except Exception as e:
+            raise SerializationError(e)
+
+        return msg
+
+
+class VnfdSerializer(ProtoMessageSerializer):
+    """ Creates a serializer for the VNFD descriptor"""
+    def __init__(self):
+        super().__init__(VnfdYang, VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd)
+
+
+class NsdSerializer(ProtoMessageSerializer):
+    """ Creates a serializer for the NSD descriptor"""
+    def __init__(self):
+        super().__init__(NsdYang, NsdYang.YangData_Nsd_NsdCatalog_Nsd)
+
+
+class RwVnfdSerializer(ProtoMessageSerializer):
+    """ Creates a serializer for the VNFD descriptor"""
+    def __init__(self):
+        super().__init__(RwVnfdYang, RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd)
+
+
+class RwNsdSerializer(ProtoMessageSerializer):
+    """ Creates a serializer for the NSD descriptor"""
+    def __init__(self):
+        super().__init__(RwNsdYang, RwNsdYang.YangData_Nsd_NsdCatalog_Nsd)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/icon.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/icon.py
new file mode 100644
index 0000000..1c3d209
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/icon.py
@@ -0,0 +1,96 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import re
+import os.path
+
+from . import package
+
+class IconExtractionError(Exception):
+    pass
+
+
+class PackageIconExtractor(object):
+    """ This class extracts icons to a known location for the UI to access """
+
+    DEFAULT_INSTALL_DIR = os.path.join(
+            os.environ["RIFT_INSTALL"],
+            "usr/share/rw.ui/skyquake/plugins/composer/public/assets/logos"
+            )
+
+    ICON_REGEX = "{prefix}/?icons/(?P<icon_name>[^/]+)$"
+
+    def __init__(self, log, install_dir=DEFAULT_INSTALL_DIR):
+        self._log = log
+        self._install_dir = install_dir
+
+    def _get_rel_dest_path(self, descriptor_type, descriptor_id, icon_name):
+        dest_path = os.path.join(self._install_dir, descriptor_type, descriptor_id, icon_name)
+        return dest_path
+
+    @classmethod
+    def package_icon_files(cls, package):
+        icon_file_map = {}
+
+        for file_name in package.files:
+            match = re.match(
+                    cls.ICON_REGEX.format(prefix=package.prefix),
+                    file_name,
+                    )
+            if match is None:
+                continue
+
+            icon_name = match.group("icon_name")
+
+            icon_file_map[icon_name] = file_name
+
+        return icon_file_map
+
+    def get_extracted_icon_path(self, descriptor_type, descriptor_id, icon_name):
+        return os.path.join(
+                self._get_rel_dest_path(descriptor_type, descriptor_id, icon_name),
+                )
+
+    def extract_icons(self, pkg):
+        """ Extract any icons in the package to the UI filesystem location
+
+        Arguments:
+            pkg - A DescriptorPackage
+        """
+        descriptor_id = pkg.descriptor_id
+        icon_files = PackageIconExtractor.package_icon_files(pkg)
+
+        for icon_name, icon_file in icon_files.items():
+            dest_rel_path = self._get_rel_dest_path(pkg.descriptor_type, descriptor_id, icon_name)
+            dest_path = os.path.join(self._install_dir, dest_rel_path)
+
+            dest_dir = os.path.dirname(dest_path)
+            try:
+                os.makedirs(dest_dir, exist_ok=True)
+            except OSError as e:
+                self._log.error("Failed to create icon directory %s: %s", dest_dir, str(e))
+                continue
+
+
+            self._log.debug("Extracting %s icon to %s", icon_name, dest_path)
+            try:
+                pkg.extract_file(icon_file, dest_path)
+            except package.ExtractError as e:
+                self._log.error("Failed to extact icon %s: %s", icon_name, str(e))
+                continue
+
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/image.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/image.py
new file mode 100644
index 0000000..9b9d17a
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/image.py
@@ -0,0 +1,55 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+
+IMAGE_REGEX = r"{prefix}/?images/(?P<image_name>[^/]+.\.qcow2)$"
+
+
+def is_image_file(image_path):
+    match = re.match(
+            IMAGE_REGEX.format(prefix=".*"),
+            image_path,
+            )
+
+    return match is not None
+
+
+def get_package_image_files(package):
+    """ Return a image name/file map for images in the descriptor
+
+    Arguments:
+        package - A DescriptorPackage
+
+    Returns:
+        A dictionary mapping image names to the relative path within
+        the package.
+    """
+    image_file_map = {}
+
+    for file_name in package.files:
+        match = re.match(
+                IMAGE_REGEX.format(prefix=package.prefix),
+                file_name,
+                )
+        if match is None:
+            continue
+
+        image_name = match.group("image_name")
+        image_file_map[image_name] = file_name
+
+    return image_file_map
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/package.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/package.py
new file mode 100644
index 0000000..355b23b
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/package.py
@@ -0,0 +1,658 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import io
+import os
+import re
+import shutil
+import tarfile
+
+from . import checksums
+from . import convert
+from . import image
+
+
+class ArchiveError(Exception):
+    pass
+
+
+class ExtractError(Exception):
+    pass
+
+
+class PackageError(Exception):
+    pass
+
+
+class PackageValidationError(Exception):
+    pass
+
+
+class PackageFileChecksumError(PackageValidationError):
+    def __init__(self, filename):
+        self.filename = filename
+        super().__init__("Checksum mismatch for {}".format(filename))
+
+
+class DescriptorPackage(object):
+    """ This class provides an base class for a descriptor package representing
+
+    A descriptor package is a package which contains a single descriptor and any
+    associated files (logos, charms, scripts, etc).  This package representation
+    attempts to be agnostic as to where the package files are being stored
+    (in memory, on disk, etc).
+
+    The package provides a simple interface to interact with the files within the
+    package and access the contained descriptor.
+    """
+    DESCRIPTOR_REGEX = r"{prefix}({descriptor_type}/[^/]*|[^/]*{descriptor_type})\.(xml|yml|yaml|json)$"
+
+    def __init__(self, log, open_fn):
+        self._log = log
+        self._open_fn = open_fn
+
+        self._package_file_mode_map = {}
+        self._package_dirs = set()
+
+    @property
+    def prefix(self):
+        """ Return the leading parent directories shared by all files in the package
+
+        In order to remain flexible as to where tar was invoked to create the package,
+        the prefix represents the common parent directory path which all files in the
+        package have in common.
+        """
+        entries = list(self._package_file_mode_map) + list(self._package_dirs)
+
+        if len(entries) > 1:
+            prefix = os.path.commonprefix(entries)
+            if prefix and not prefix.endswith("/"):
+                prefix += "/"
+        elif len(entries) == 1:
+            entry = entries[0]
+            if "/" in entry:
+                prefix = os.path.dirname(entry) + "/"
+            else:
+                prefix = ""
+        else:
+            prefix = ""
+
+        return prefix
+
+    @property
+    def files(self):
+        """ Return all files (with the prefix) in the package """
+        return list(self._package_file_mode_map)
+
+    @property
+    def dirs(self):
+        """ Return all directories in the package """
+        return list(self._package_dirs)
+
+    @property
+    def descriptor_type(self):
+        """ A shorthand name for the type of descriptor (e.g. nsd)"""
+        raise NotImplementedError("Subclass must implement this property")
+
+    @property
+    def serializer(self):
+        """ An instance of convert.ProtoMessageSerializer """
+        raise NotImplementedError("Subclass must implement this property")
+
+    @property
+    def descriptor_file(self):
+        """ The descriptor file name (with prefix) """
+        regex = self.__class__.DESCRIPTOR_REGEX.format(
+                descriptor_type=self.descriptor_type,
+                prefix=self.prefix,
+                )
+        desc_file = None
+        for filename in self.files:
+            if re.match(regex, filename):
+                if desc_file is not None:
+                    raise PackageError("Package contains more than one descriptor")
+                desc_file = filename
+
+        if desc_file is None:
+            raise PackageError("Could not find descriptor file in package")
+
+        return desc_file
+
+    @property
+    def descriptor_msg(self):
+        """ The proto-GI descriptor message """
+        filename = self.descriptor_file
+        with self.open(filename) as hdl:
+            _, ext = os.path.splitext(filename)
+            nsd = self.serializer.from_file_hdl(hdl, ext)
+            return nsd
+
+    @property
+    def json_descriptor(self):
+        """  The JSON serialized descriptor message"""
+        nsd = self.descriptor_msg
+        return self.serializer.to_json_string(nsd)
+
+    @property
+    def descriptor_id(self):
+        """  The descriptor id which uniquely identifies this descriptor in the system """
+        if not self.descriptor_msg.has_field("id"):
+            msg = "Descriptor must have an id field"
+            self._log.error(msg)
+            raise PackageError(msg)
+
+        return self.descriptor_msg.id
+
+    @classmethod
+    def get_descriptor_patterns(cls):
+        """ Returns a tuple of descriptor regex and Package Types  """
+        package_types = (VnfdPackage, NsdPackage)
+        patterns = []
+
+        for pkg_cls in package_types:
+            regex = cls.DESCRIPTOR_REGEX.format(
+                    descriptor_type=pkg_cls.DESCRIPTOR_TYPE,
+                    prefix=".*"
+                    )
+
+            patterns.append((regex, pkg_cls))
+
+        return patterns
+
+    @classmethod
+    def from_package_files(cls, log, open_fn, files):
+        """ Creates a new DescriptorPackage subclass instance from a list of files
+
+        This classmethod detects the Package type from the package contents
+        and returns a new Package instance.
+
+        This will NOT subsequently add the files to the package so that must
+        be done by the client
+
+        Arguments:
+            log - A logger
+            open_fn - A function which can take a file name and mode and return
+                      a file handle.
+            files - A list of files which would be added to the package after
+                    intantiation
+
+        Returns:
+            A new DescriptorPackage subclass of the correct type for the descriptor
+
+        Raises:
+            PackageError - Package type could not be determined from the list of files.
+        """
+        patterns = cls.get_descriptor_patterns()
+        pkg_cls = None
+        regexes = set()
+        for name in files:
+            for regex, cls in patterns:
+                regexes.add(regex)
+                if re.match(regex, name) is not None:
+                    pkg_cls = cls
+                    break
+
+        if pkg_cls is None:
+            log.error("No file in archive matched known descriptor formats: %s", regexes)
+            raise PackageError("Could not determine package type from contents")
+
+        package = pkg_cls(log, open_fn)
+        return package
+
+    @classmethod
+    def from_descriptor_file_hdl(cls, log, file_hdl):
+        """ Creates a new DescriptorPackage from a descriptor file handle
+
+        The descriptor file is added to the package before returning.
+
+        Arguments:
+            log - A logger
+            file_hdl - A file handle whose name attribute can be recognized as
+                       particular descriptor type.
+
+        Returns:
+            A new DescriptorPackage subclass of the correct type for the descriptor
+
+        Raises:
+            PackageError - Package type could not be determined from the list of files.
+            ValueError - file_hdl did not have a name attribute provided
+        """
+
+        package_types = (VnfdPackage, NsdPackage)
+        filename_patterns = []
+        for package_cls in package_types:
+            filename_patterns.append(
+                    (r".*{}.*".format(package_cls.DESCRIPTOR_TYPE), package_cls)
+                    )
+
+        if not hasattr(file_hdl, 'name'):
+            raise ValueError("File descriptor must have a name attribute to create a descriptor package")
+
+        # Iterate through the recognized patterns and assign files accordingly
+        package_cls = None
+        for pattern, cls in filename_patterns:
+            if re.match(pattern, file_hdl.name):
+                package_cls = cls
+                break
+
+        if not package_cls:
+            raise PackageError("Could not determine package type from file name: %s" % file_hdl.name)
+
+        _, ext = os.path.splitext(file_hdl.name)
+        try:
+            package_cls.SERIALIZER.from_file_hdl(file_hdl, ext)
+        except convert.SerializationError as e:
+            raise PackageError("Could not deserialize descriptor %s" % file_hdl.name) from e
+
+        # Create a new file handle for each open call to prevent independent clients
+        # from affecting each other
+        file_hdl.seek(0)
+        new_hdl = io.BytesIO(file_hdl.read())
+
+        def do_open(file_path):
+            assert file_path == file_hdl.name
+            hdl = io.BytesIO(new_hdl.getvalue())
+            return hdl
+
+        desc_pkg = package_cls(log, do_open)
+        desc_pkg.add_file(file_hdl.name)
+
+        return desc_pkg
+
+    def get_file_mode(self, pkg_file):
+        """ Returns the file mode for the package file
+
+        Arguments:
+            pkg_file - A file name in the package
+
+        Returns:
+            The permission mode
+
+        Raises:
+            PackageError - The file does not exist in the package
+        """
+        try:
+            return self._package_file_mode_map[pkg_file]
+        except KeyError as e:
+            msg = "Could not find package_file: %s" % pkg_file
+            self._log.error(msg)
+            raise PackageError(msg) from e
+
+    def extract_dir(self, src_dir, dest_root_dir, extract_images=False):
+        """ Extract a specific directory contents to dest_root_dir
+
+        Arguments:
+            src_dir - A directory within the package (None means all files/directories)
+            dest_root_dir - A directory to extract directory contents to
+            extract_images - A flag indicating whether we want to extract images
+
+        Raises:
+            ExtractError - Directory contents could not be extracted
+        """
+        if src_dir is not None and src_dir not in self._package_dirs:
+            raise ExtractError("Could not find source dir: %s" % src_dir)
+
+        for filename in self.files:
+            if not extract_images and image.is_image_file(filename):
+                continue
+
+            if src_dir is not None and not filename.startswith(src_dir):
+                continue
+
+            # Copy the contents of the file to the correct path
+            dest_file_path = os.path.join(dest_root_dir, filename)
+            dest_dir_path = os.path.dirname(dest_file_path)
+            if not os.path.exists(dest_dir_path):
+                os.makedirs(dest_dir_path)
+
+            with open(dest_file_path, 'wb') as dst_hdl:
+                with self.open(filename) as src_hdl:
+                    shutil.copyfileobj(src_hdl, dst_hdl, 10 * 1024 * 1024)
+
+                    # Set the file mode to original
+                    os.chmod(dest_file_path, self._package_file_mode_map[filename])
+
+    def extract_file(self, src_file, dest_file):
+        """ Extract a specific package file to dest_file
+
+        The destination directory will be created if it does not exist.
+
+        Arguments:
+            src_file - A file within the package
+            dest_file - A file path to extract file contents to
+
+        Raises:
+            ExtractError - Directory contents could not be extracted
+        """
+        if src_file not in self._package_file_mode_map:
+            msg = "Could not find source file %s" % src_file
+            self._log.error(msg)
+            raise ExtractError(msg)
+
+        # Copy the contents of the file to the correct path
+        dest_dir_path = os.path.dirname(dest_file)
+        if not os.path.isdir(dest_dir_path):
+            os.makedirs(dest_dir_path)
+
+        with open(dest_file, 'wb') as dst_hdl:
+            with self.open(src_file) as src_hdl:
+                shutil.copyfileobj(src_hdl, dst_hdl, 10 * 1024 * 1024)
+
+                # Set the file mode to original
+                os.chmod(dest_file, self._package_file_mode_map[src_file])
+
+    def extract(self, dest_root_dir, extract_images=False):
+        """ Extract all package contents to a destination directory
+
+        Arguments:
+            dest_root_dir - The directory to extract package contents to
+
+        Raises:
+            NotADirectoryError - dest_root_dir is not a directory
+        """
+        if not os.path.isdir(dest_root_dir):
+            raise NotADirectoryError(dest_root_dir)
+
+        self.extract_dir(None, dest_root_dir, extract_images)
+
+    def open(self, rel_path):
+        """ Open a file contained in the package in read-only, binary mode.
+
+        Arguments:
+            rel_path - The file path within the package
+
+        Returns:
+            A file-like object opened in read-only mode.
+
+        Raises:
+            PackageError - The file could not be opened
+        """
+        try:
+            return self._open_fn(rel_path)
+        except Exception as e:
+            msg = "Could not open file from package: %s" % rel_path
+            self._log.warning(msg)
+            raise PackageError(msg) from e
+
+    def add_file(self, rel_path, mode=0o777):
+        """ Add a file to the package.
+
+        The file should be specified as a relative path to the package
+        root.  The open_fn provided in the constructor must be able to
+        take the relative path and open the actual source file from
+        wherever the file actually is stored.
+
+        If the file's parent directories do not yet exist, add them to
+        the package.
+
+        Arguments:
+            rel_path - The file path relative to the top of the package.
+            mode - The permission mode the file should be stored with so
+                   it can be extracted with the correct permissions.
+
+        Raises:
+            PackageError - The file could not be added to the package
+        """
+        if not rel_path:
+            raise PackageError("Empty file name added")
+
+        if rel_path in self._package_file_mode_map:
+            raise PackageError("File %s already exists in package" % rel_path)
+
+        # If the file's directory is not in the package add it.
+        rel_dir = os.path.dirname(rel_path)
+        while rel_dir:
+            self._package_dirs.add(rel_dir)
+            rel_dir = os.path.dirname(rel_dir)
+
+        self._package_file_mode_map[rel_path] = mode
+
+    def add_dir(self, rel_path):
+        """ Add a directory to the package
+
+        Arguments:
+            rel_path - The directories relative path.
+
+        Raises:
+            PackageError - A file already exists in the package with the same name.
+        """
+        if rel_path in self._package_file_mode_map:
+            raise PackageError("File already exists with the same name: %s", rel_path)
+
+        if rel_path in self._package_dirs:
+            self._log.warning("%s directory already exists", rel_path)
+            return
+
+        self._package_dirs.add(rel_path)
+
+
+class NsdPackage(DescriptorPackage):
+    DESCRIPTOR_TYPE = "nsd"
+    SERIALIZER = convert.RwNsdSerializer()
+
+    @property
+    def descriptor_type(self):
+        return "nsd"
+
+    @property
+    def serializer(self):
+        return NsdPackage.SERIALIZER
+
+
+class VnfdPackage(DescriptorPackage):
+    DESCRIPTOR_TYPE = "vnfd"
+    SERIALIZER = convert.RwVnfdSerializer()
+
+    @property
+    def descriptor_type(self):
+        return "vnfd"
+
+    @property
+    def serializer(self):
+        return VnfdPackage.SERIALIZER
+
+
+class PackageChecksumValidator(object):
+    """  This class uses the checksums.txt file in the package
+    and validates that all files in the package match the checksum that exists within
+    the file.
+    """
+    CHECKSUM_FILE = "{prefix}checksums.txt"
+
+    def __init__(self, log):
+        self._log = log
+
+    @classmethod
+    def get_package_checksum_file(cls, package):
+        checksum_file = cls.CHECKSUM_FILE.format(prefix=package.prefix)
+        if checksum_file not in package.files:
+            raise FileNotFoundError("%s does not exist in archive" % checksum_file)
+
+        return checksum_file
+
+    def validate(self, package):
+        """ Validate file checksums match that in the checksums.txt
+
+        Arguments:
+            package - The Descriptor Package which possiblity contains checksums.txt
+
+        Returns: A dictionary of files that were validated by the checksums.txt
+                 along with their checksums
+
+        Raises:
+            PackageValidationError - The package validation failed for some
+              generic reason.
+            PackageFileChecksumError - A file within the package did not match the
+              checksum within checksums.txt
+        """
+        validated_file_checksums = {}
+
+        try:
+            checksum_file = PackageChecksumValidator.get_package_checksum_file(package)
+            with package.open(checksum_file) as checksum_hdl:
+                archive_checksums = checksums.ArchiveChecksums.from_file_desc(checksum_hdl)
+        except (FileNotFoundError, PackageError) as e:
+            self._log.warning("Could not open package checksum file.  Not validating checksums.")
+            return validated_file_checksums
+
+        for pkg_file in package.files:
+            if pkg_file == checksum_file:
+                continue
+
+            pkg_file_no_prefix = pkg_file.replace(package.prefix, "", 1)
+            if pkg_file_no_prefix not in archive_checksums:
+                self._log.warning("File %s not found in checksum file %s",
+                                  pkg_file, checksum_file)
+                continue
+
+            try:
+                with package.open(pkg_file) as pkg_file_hdl:
+                    file_checksum = checksums.checksum(pkg_file_hdl)
+            except PackageError as e:
+                msg = "Could not read package file {} for checksum validation: {}".format(
+                      pkg_file, str(e))
+                self._log.error(msg)
+                raise PackageValidationError(msg) from e
+
+            if archive_checksums[pkg_file_no_prefix] != file_checksum:
+                msg = "{} checksum ({}) did match expected checksum ({})".format(
+                        pkg_file, file_checksum, archive_checksums[pkg_file_no_prefix]
+                        )
+                self._log.error(msg)
+                raise PackageFileChecksumError(pkg_file)
+
+            validated_file_checksums[pkg_file] = file_checksum
+
+        return validated_file_checksums
+
+
+class TarPackageArchive(object):
+    """  This class represents a package stored within a tar.gz archive file """
+    def __init__(self, log, tar_file_hdl, mode="r"):
+        self._log = log
+        self._tar_filepath = tar_file_hdl
+        self._tar_infos = {}
+
+        self._tarfile = tarfile.open(fileobj=tar_file_hdl, mode=mode)
+
+        self._load_archive()
+
+    def __repr__(self):
+        return "TarPackageArchive(%s)" % self._tar_filepath
+
+    def _get_members(self):
+        return [info for info in self._tarfile.getmembers()]
+
+    def _load_archive(self):
+        self._tar_infos = {info.name: info for info in self._get_members() if info.name}
+
+    def __del__(self):
+        self.close()
+
+    def close(self):
+        """ Close the opened tarfile"""
+        if self._tarfile is not None:
+            self._tarfile.close()
+            self._tarfile = None
+
+    @property
+    def filenames(self):
+        """ The list of file members within the tar file """
+        return [name for name in self._tar_infos if tarfile.TarInfo.isfile(self._tar_infos[name])]
+
+    def open_file(self, rel_file_path):
+        """ Opens a file within the archive as read-only, byte mode.
+
+        Arguments:
+            rel_file_path - The file path within the archive to open
+
+        Returns:
+            A file like object (see tarfile.extractfile())
+
+        Raises:
+            ArchiveError - The file could not be opened for some generic reason.
+        """
+        if rel_file_path not in self._tar_infos:
+            raise ArchiveError("Could not find %s in tar file", rel_file_path)
+
+        try:
+            return self._tarfile.extractfile(rel_file_path)
+        except tarfile.TarError as e:
+            msg = "Failed to read file {} from tarfile {}: {}".format(
+                  rel_file_path, self._tar_filepath, str(e)
+                  )
+            self._log.error(msg)
+            raise ArchiveError(msg) from e
+
+    def create_package(self):
+        """  Creates a Descriptor package from the archive contents
+
+        Returns:
+            A DescriptorPackage of the correct descriptor type
+        """
+        package = DescriptorPackage.from_package_files(self._log, self.open_file, self.filenames)
+        for pkg_file in self.filenames:
+            package.add_file(pkg_file, self._tar_infos[pkg_file].mode)
+
+        return package
+
+
+class TemporaryPackage(object):
+    """  This class is a container for a temporary file-backed package
+
+    This class contains a DescriptorPackage and can be used in place of one.
+    Provides a useful context manager which will close and destroy the file
+    that is backing the DescriptorPackage on exit.
+    """
+    def __init__(self, log, package, file_hdl):
+        self._log = log
+        self._package = package
+        self._file_hdl = file_hdl
+
+        if not hasattr(self._file_hdl, "name"):
+            raise ValueError("File handle must have a name attribute")
+
+    def __getattr__(self, attr):
+        return getattr(self._package, attr)
+
+    def __enter__(self):
+        return self._package
+
+    def __exit__(self, type, value, tb):
+        self.close()
+
+    def filename(self):
+        """ Returns the filepath with is backing the Package """
+        return self._file_hdl.name
+
+    def package(self):
+        """ The contained DescriptorPackage instance """
+        return self._package
+
+    def close(self):
+        """ Close and remove the backed file """
+        filename = self._file_hdl.name
+
+        try:
+            self._file_hdl.close()
+        except OSError as e:
+            self._log.warning("Failed to close package file: %s", str(e))
+
+        try:
+            os.remove(filename)
+        except OSError as e:
+            self._log.warning("Failed to remove package file: %s", str(e))
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/script.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/script.py
new file mode 100644
index 0000000..01f66b0
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/script.py
@@ -0,0 +1,84 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import re
+import os.path
+
+from . import package
+
+
+class ScriptExtractionError(Exception):
+    pass
+
+
+class PackageScriptExtractor(object):
+    """ This class is reponsible for extracting scripts to the correct directory
+
+    In order to remain compatible with the existing config manager, we extract the scripts
+    to a known location (RIFT-13282)
+    """
+    DEFAULT_INSTALL_DIR = os.path.join(
+            os.environ["RIFT_INSTALL"],
+            "usr/bin"
+            )
+
+    SCRIPT_REGEX = "{prefix}/?scripts/(?P<script_name>[^/]+)$"
+
+    def __init__(self, log, install_dir=DEFAULT_INSTALL_DIR):
+        self._log = log
+        self._install_dir = install_dir
+
+    def _get_rel_dest_path(self, descriptor_id, script_name):
+        dest_path = os.path.join(self._install_dir, script_name)
+        return dest_path
+
+    @classmethod
+    def package_script_files(cls, package):
+        script_file_map = {}
+
+        for file_name in package.files:
+            match = re.match(
+                    cls.SCRIPT_REGEX.format(prefix=package.prefix),
+                    file_name,
+                    )
+            if match is None:
+                continue
+
+            script_name = match.group("script_name")
+
+            script_file_map[script_name] = file_name
+
+        return script_file_map
+
+    def get_extracted_script_path(self, package_id, script_name):
+        return os.path.join(
+                self._get_rel_dest_path(package_id, script_name),
+                )
+
+    def extract_scripts(self, pkg):
+        descriptor_id = pkg.descriptor_id
+        script_files = PackageScriptExtractor.package_script_files(pkg)
+
+        for script_name, script_file in script_files.items():
+            dest_path = self._get_rel_dest_path(descriptor_id, script_name)
+
+            self._log.debug("Extracting %s script to %s", script_name, dest_path)
+            try:
+                pkg.extract_file(script_file, dest_path)
+            except package.ExtractError as e:
+                raise ScriptExtractionError("Failed to extract script %s" % script_name) from e
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/store.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/store.py
new file mode 100644
index 0000000..454546d
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/store.py
@@ -0,0 +1,211 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import os
+import shutil
+
+from . import package
+
+
+class PackageStoreError(Exception):
+    pass
+
+
+class PackageExistsError(PackageStoreError):
+    pass
+
+
+class PackageNotFoundError(PackageStoreError):
+    pass
+
+
+class PackageFilesystemStore(object):
+    """ This class is able to store/retreive/delete DescriptorPackages on disk
+
+    To insulate components from having to deal with accessing the filesystem directly
+    to deal with onboarded packages, this class provides a convenient interface for
+    storing, retreiving, deleting packages stored on disk.  The interfaces deal directly
+    with DescriptorPackages so clients are abstracted from the actual location on disk.
+    """
+
+    def __init__(self, log, root_dir):
+        self._log = log
+        self._root_dir = root_dir
+        self._package_dirs = {}
+
+        self.refresh()
+
+    def _get_package_dir(self, package_id):
+        return os.path.join(self._root_dir, package_id)
+
+    def _get_package_files(self, package_id):
+        package_files = {}
+
+        package_dir = self._get_package_dir(package_id)
+
+        for dirpath, dirnames, filenames in os.walk(package_dir):
+            for name in filenames:
+                file_path = os.path.join(dirpath, name)
+                file_rel_path = os.path.relpath(file_path, package_dir)
+                package_files[file_rel_path] = file_path
+
+        return package_files
+
+    def refresh(self):
+        """ Refresh the package index from disk  """
+        if not os.path.exists(self._root_dir):
+            self._package_dirs = {}
+            return
+
+        package_dirs = {}
+        for package_id_dir in os.listdir(self._root_dir):
+            try:
+                package_dir_path = os.path.join(self._root_dir, package_id_dir)
+                if not os.path.isdir(package_dir_path):
+                    self._log.warning("Unknown file in package store: %s", package_dir_path)
+                    continue
+
+                files = os.listdir(package_dir_path)
+                if len(files) == 0:
+                    self._log.warning("Package directory %s is empty", package_dir_path)
+                    continue
+
+                package_id = os.path.basename(package_id_dir)
+                package_dirs[package_id] = package_id_dir
+
+            except OSError as e:
+                self._log.warning("Failed to read packages from %s: %s",
+                                  package_dir_path, str(e))
+
+        self._package_dirs = package_dirs
+
+    def get_package(self, package_id):
+        """ Get a DescriptorPackage on disk from the package descriptor id
+
+        Arguments:
+            package_id - The DescriptorPackage.descriptor_id
+
+        Returns:
+            A DescriptorPackage instance of the correct type
+
+        Raises:
+            PackageStoreError- The package could not be retrieved
+        """
+        if package_id not in self._package_dirs:
+            msg = "Package %s not found in %s" % (package_id, self._root_dir)
+            raise PackageStoreError(msg)
+
+        package_files = self._get_package_files(package_id)
+        package_dir = self._get_package_dir(package_id)
+
+        def do_open(pkg_file):
+            pkg_path = os.path.join(package_dir, pkg_file)
+            return open(pkg_path, "rb")
+
+        pkg = package.DescriptorPackage.from_package_files(self._log, do_open, package_files)
+        for pkg_file in package_files:
+            pkg.add_file(pkg_file)
+
+        return pkg
+
+    def store_package(self, pkg):
+        """ Store a DescriptorPackage to disk
+
+        Arguments:
+            pkg - A DescriptorPackage
+
+        Raises:
+            PackageStoreError - The package could not be stored
+        """
+        if pkg.descriptor_id in self._package_dirs:
+            raise PackageExistsError("Package %s already exists", pkg.descriptor_id)
+
+        package_dir = self._get_package_dir(pkg.descriptor_id)
+
+        try:
+            os.makedirs(package_dir, exist_ok=True)
+        except OSError as e:
+            raise PackageStoreError("Failed to create package dir: %s", package_dir) from e
+
+        try:
+            self._log.debug("Storing package in dir %s", package_dir)
+            pkg.extract(package_dir)
+            self._log.debug("Package stored in dir %s", package_dir)
+        except pkg.PackageError as e:
+            raise PackageStoreError("Failed to extract package to package store") from e
+
+        self._package_dirs[pkg.descriptor_id] = package_dir
+
+    def delete_package(self, descriptor_id):
+        """ Delete a stored DescriptorPackage
+
+        Arguments:
+            descriptor_id - The DescriptorPackage.descriptor_id
+
+        Raises:
+            PackageNotFoundError - The package could not be found
+            PackageStoreError - The package could not be deleted
+        """
+
+        if descriptor_id not in self._package_dirs:
+            raise PackageNotFoundError("Package %s does not exists", descriptor_id)
+
+        package_dir = self._get_package_dir(descriptor_id)
+        try:
+            if os.path.exists(package_dir):
+                self._log.debug("Removing stored package directory: %s", package_dir)
+                shutil.rmtree(package_dir)
+        except OSError as e:
+            raise PackageStoreError(
+                    "Failed to remove stored package directory: %s", package_dir
+                    ) from e
+
+        del self._package_dirs[descriptor_id]
+
+    def update_package(self, pkg):
+        """ Update a stored DescriptorPackage
+
+        Arguments:
+            pkg - A DescriptorPackage
+
+        Raises:
+            PackageNotFoundError - The package could not be found
+            PackageStoreError - The package could not be deleted
+        """
+        self.delete_package(pkg.descriptor_id)
+        self.store_package(pkg)
+
+
+class NsdPackageFilesystemStore(PackageFilesystemStore):
+    DEFAULT_ROOT_DIR = os.path.join(
+            os.environ["RIFT_ARTIFACTS"],
+            "launchpad", "packages", "nsd"
+            )
+
+    def __init__(self, log, root_dir=DEFAULT_ROOT_DIR):
+        super().__init__(log, root_dir)
+
+
+class VnfdPackageFilesystemStore(PackageFilesystemStore):
+    DEFAULT_ROOT_DIR = os.path.join(
+            os.environ["RIFT_ARTIFACTS"],
+            "launchpad", "packages", "vnfd"
+            )
+
+    def __init__(self, log, root_dir=DEFAULT_ROOT_DIR):
+        super().__init__(log, root_dir)
+
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/__init__.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/__init__.py
new file mode 100644
index 0000000..4c17a07
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/__init__.py
@@ -0,0 +1 @@
+from .tasklet import LaunchpadTasklet
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/convert_pkg.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/convert_pkg.py
new file mode 100644
index 0000000..ba04e8f
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/convert_pkg.py
@@ -0,0 +1,102 @@
+############################################################################
+# Copyright 2016 RIFT.IO Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
+
+
+import os
+import shutil
+import tempfile
+
+from .tosca import ImportTosca
+
+
+class ConvertPackageError(Exception):
+    pass
+
+
+class ConvertPackage(object):
+    """Convert a package to our YANG model
+
+    Currently only TOSCA to our model is supported
+    """
+
+    def __init__(self, log, filename, pkgfile):
+        self._log = log
+        self._filename = filename
+        self._pkgfile = pkgfile
+        self._tempdir = None
+
+    def convert(self, delete=False):
+        """Convert package to our YANG model
+
+        Arguments:
+          delete: If the pkgfile is to be deleted after converting
+
+        Returns:
+          List of descriptor packages. If the package is not a
+          suported format, None is returned
+
+        Note:
+          This will create a temporary directory and the converted
+          files will be in that. The converted files and directory
+          need to be deleted after use.
+        """
+
+        # Create a temporary directory to store the converted packages
+        tempdir = tempfile.mkdtemp()
+
+        out_files = []
+        converted = False
+        # Check if this is a tosca archive
+        if ImportTosca.is_tosca_package(self._pkgfile):
+            self._log.debug("Uploaded file {} is a TOSCA archive".
+                            format(self._filename))
+            try:
+                tosca = ImportTosca(self._log, self._pkgfile, out_dir=tempdir)
+                out_files = tosca.translate()
+                converted = True
+
+            except Exception as e:
+                self._log.error("Exception converting package from TOSCA {}: {}".
+                                format(self._filename, e))
+
+                # Remove the tempdir
+                try:
+                    shutil.rmtree(tempdir)
+                except OSError as e:
+                    self._log.warning("Unable to remove temporary directory {}: {}".
+                                      format(tempdir, e))
+
+                raise
+
+        # Delete the input file, if converted
+        if converted:
+            self._tempdir = tempdir
+            try:
+                os.remove(self._pkgfile)
+            except OSError as e:
+                self._log.warning("Failed to remove package file: %s", str(e))
+        else:
+            # Remove the temp dir
+            shutil.rmtree(tempdir, ignore_errors=True)
+
+            #Return the input file
+            out_files.append(self._pkgfile)
+
+
+        # Return the converted files
+        self._log.debug("Converted package files: {}".format(out_files))
+        return out_files
+
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py
new file mode 100644
index 0000000..84fddb6
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py
@@ -0,0 +1,132 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+
+from gi.repository import (
+    RwDts,
+    RwLaunchpadYang,
+)
+
+import rift.openmano.openmano_client as openmano_client
+import rift.tasklets
+
+
+class DataCenterPublisher(object):
+    """
+    This class is reponsible for exposing the data centers associated with an
+    openmano cloud account.
+    """
+
+    XPATH = "D,/rw-launchpad:datacenters"
+
+    def __init__(self, tasklet):
+        """Creates an instance of a DataCenterPublisher
+
+        Arguments:
+            tasklet - the tasklet that this publisher is registered for
+
+        """
+        self.tasklet = tasklet
+        self.reg = None
+
+    @property
+    def dts(self):
+        """The DTS instance used by this tasklet"""
+        return self.tasklet.dts
+
+    @property
+    def log(self):
+        """The logger used by this tasklet"""
+        return self.tasklet.log
+
+    @property
+    def loop(self):
+        """The event loop used by this tasklet"""
+        return self.tasklet.loop
+
+    @property
+    def accounts(self):
+        """The known openmano cloud accounts"""
+        accounts = list()
+        for acc in self.tasklet.cloud_accounts:
+            if acc.account_type == "openmano":
+                accounts.append(acc.account_msg)
+
+        return accounts
+
+    @asyncio.coroutine
+    def register(self):
+        """Registers the publisher with DTS"""
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            try:
+                # Create a datacenters instance to hold all of the cloud
+                # account data.
+                datacenters = RwLaunchpadYang.DataCenters()
+
+                # Iterate over the known openmano accounts and populate cloud
+                # account instances with the corresponding data center info
+                for account in self.accounts:
+                    try:
+                        cloud_account = RwLaunchpadYang.CloudAccount()
+                        cloud_account.name = account.name
+
+                        # Create a client for this cloud account to query for
+                        # the associated data centers
+                        client = openmano_client.OpenmanoCliAPI(
+                                self.log,
+                                account.openmano.host,
+                                account.openmano.port,
+                                account.openmano.tenant_id,
+                                )
+
+                        # Populate the cloud account with the data center info
+                        for uuid, name in client.datacenter_list():
+                            cloud_account.datacenters.append(
+                                    RwLaunchpadYang.DataCenter(
+                                        uuid=uuid,
+                                        name=name,
+                                        )
+                                    )
+
+                        datacenters.cloud_accounts.append(cloud_account)
+
+                    except Exception as e:
+                        self.log.exception(e)
+
+                xact_info.respond_xpath(
+                        RwDts.XactRspCode.MORE,
+                        'D,/rw-launchpad:datacenters',
+                        datacenters,
+                        )
+
+                xact_info.respond_xpath(RwDts.XactRspCode.ACK)
+
+            except Exception as e:
+                self.log.exception(e)
+                raise
+
+        handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
+
+        with self.dts.group_create() as group:
+            self.reg = group.register(
+                    xpath=DataCenterPublisher.XPATH,
+                    handler=handler,
+                    flags=RwDts.Flag.PUBLISHER,
+                    )
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/export.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/export.py
new file mode 100644
index 0000000..4256765
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/export.py
@@ -0,0 +1,414 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import io
+import os.path
+import stat
+import time
+import uuid
+
+import tornado.web
+
+import rift.package.archive
+import rift.package.checksums
+import rift.package.package
+import rift.package.store
+import rift.package.image
+
+from . import state
+from . import message
+from . import tosca
+
+import gi
+gi.require_version('NsdYang', '1.0')
+gi.require_version('VnfdYang', '1.0')
+
+from gi.repository import (
+        NsdYang,
+        VnfdYang,
+        )
+
+
+class ExportStart(message.StatusMessage):
+    def __init__(self):
+        super().__init__("export-started", "export process started")
+
+
+class ExportSuccess(message.StatusMessage):
+    def __init__(self):
+        super().__init__("export-success", "export process successfully completed")
+
+
+class ExportFailure(message.StatusMessage):
+    def __init__(self):
+        super().__init__("export-failure", "export process failed")
+
+
+class ExportError(message.ErrorMessage):
+    def __init__(self, msg):
+        super().__init__("update-error", msg)
+
+
+class ExportSingleDescriptorOnlyError(ExportError):
+    def __init__(self):
+        super().__init__("Only a single descriptor can be exported")
+
+
+class ArchiveExportError(Exception):
+    pass
+
+
+class DescriptorPackageArchiveExporter(object):
+    def __init__(self, log):
+        self._log = log
+
+    def _create_archive_from_package(self, archive_hdl, package, open_fn):
+        orig_open = package.open
+        try:
+            package.open = open_fn
+            archive = rift.package.archive.TarPackageArchive.from_package(
+                    self._log, package, archive_hdl
+                    )
+            return archive
+        finally:
+            package.open = orig_open
+
+    def create_archive(self, archive_hdl, package, desc_json_str, serializer):
+        """ Create a package archive from an existing package, descriptor messages,
+            and a destination serializer.
+
+        In order to stay flexible with the package directory structure and
+        descriptor format, attempt to "augment" the onboarded package with the
+        updated descriptor in the original format.  If the original package
+        contained a checksum file, then recalculate the descriptor checksum.
+
+        Arguments:
+            archive_hdl - An open file handle with 'wb' permissions
+            package - A DescriptorPackage instance
+            desc_json_str - A descriptor (e.g. nsd, vnfd) protobuf message
+            serializer - A destination serializer (e.g. VnfdSerializer)
+
+        Returns:
+            A TarPackageArchive
+
+        Raises:
+            ArchiveExportError - The exported archive failed to create
+
+        """
+        new_desc_msg = serializer.from_file_hdl(io.BytesIO(desc_json_str.encode()), ".json")
+        _, dest_ext = os.path.splitext(package.descriptor_file)
+        new_desc_hdl = io.BytesIO(serializer.to_string(new_desc_msg, dest_ext).encode())
+        descriptor_checksum = rift.package.checksums.checksum(new_desc_hdl)
+
+        checksum_file = None
+        try:
+            checksum_file = rift.package.package.PackageChecksumValidator.get_package_checksum_file(
+                    package
+                    )
+
+        except FileNotFoundError:
+            pass
+
+        # Since we're going to intercept the open function to rewrite the descriptor
+        # and checksum, save a handle to use below
+        open_fn = package.open
+
+        def create_checksum_file_hdl():
+            with open_fn(checksum_file) as checksum_hdl:
+                archive_checksums = rift.package.checksums.ArchiveChecksums.from_file_desc(
+                        checksum_hdl
+                        )
+
+            archive_checksums[package.descriptor_file] = descriptor_checksum
+
+            checksum_hdl = io.BytesIO(archive_checksums.to_string().encode())
+            return checksum_hdl
+
+        def open_wrapper(rel_path):
+            """ Wraps the package open in order to rewrite the descriptor file and checksum """
+            if rel_path == package.descriptor_file:
+                return new_desc_hdl
+
+            elif rel_path == checksum_file:
+                return create_checksum_file_hdl()
+
+            return open_fn(rel_path)
+
+        archive = self._create_archive_from_package(archive_hdl, package, open_wrapper)
+
+        return archive
+
+    def export_package(self, package, export_dir, file_id, json_desc_str, dest_serializer):
+        """ Export package as an archive to the export directory
+
+        Arguments:
+            package - A DescriptorPackage instance
+            export_dir - The directory to export the package archive to
+            file_id - A unique file id to name the archive as (i.e. <file_id>.tar.gz)
+            json_desc_str - A descriptor (e.g. nsd, vnfd) json message string
+            dest_serializer - A destination serializer (e.g. VnfdSerializer)
+
+        Returns:
+            The created archive path
+
+        Raises:
+            ArchiveExportError - The exported archive failed to create
+        """
+        try:
+            os.makedirs(export_dir, exist_ok=True)
+        except FileExistsError:
+            pass
+
+        archive_path = os.path.join(export_dir, file_id + ".tar.gz")
+        with open(archive_path, 'wb') as archive_hdl:
+            try:
+                self.create_archive(
+                    archive_hdl, package, json_desc_str, dest_serializer
+                    )
+            except Exception as e:
+                os.remove(archive_path)
+                msg = "Failed to create exported archive"
+                self._log.error(msg)
+                raise ArchiveExportError(msg) from e
+
+        return archive_path
+
+
+class ExportHandler(tornado.web.RequestHandler):
+    def options(self, *args, **kargs):
+        pass
+
+    def set_default_headers(self):
+        self.set_header('Access-Control-Allow-Origin', '*')
+        self.set_header('Access-Control-Allow-Headers',
+                        'Content-Type, Cache-Control, Accept, X-Requested-With, Authorization')
+        self.set_header('Access-Control-Allow-Methods', 'POST, GET, PUT, DELETE')
+
+    def initialize(self, log, loop, store_map, exporter, catalog_map):
+        self.loop = loop
+        self.transaction_id = str(uuid.uuid4())
+        self.log = message.Logger(
+                log,
+                self.application.messages[self.transaction_id],
+                )
+        self.store_map = store_map
+        self.exporter = exporter
+        self.catalog_map = catalog_map
+
+    def get(self, desc_type):
+        if desc_type not in self.catalog_map:
+            raise tornado.web.HTTPError(400, "unknown descriptor type: {}".format(desc_type))
+
+        self.log.message(ExportStart())
+
+        # Parse the IDs
+        ids_query = self.get_query_argument("ids")
+        ids = [id.strip() for id in ids_query.split(',')]
+        if len(ids) != 1:
+            raise message.MessageException(ExportSingleDescriptorOnlyError)
+        desc_id = ids[0]
+
+        catalog = self.catalog_map[desc_type]
+
+        if desc_id not in catalog:
+            raise tornado.web.HTTPError(400, "unknown descriptor id: {}".format(desc_id))
+
+        desc_msg = catalog[desc_id]
+
+        # Get the schema for exporting
+        schema = self.get_argument("schema", default="rift")
+
+        # Get the grammar for exporting
+        grammar = self.get_argument("grammar", default="osm")
+
+        # Get the format for exporting
+        format_ = self.get_argument("format", default="yaml")
+
+        filename = None
+
+        if grammar == 'tosca':
+            filename = "{}.zip".format(self.transaction_id)
+            self.export_tosca(schema, format_, desc_type, desc_id, desc_msg)
+            self.log.message(message.FilenameMessage(filename))
+        else:
+            filename = "{}.tar.gz".format(self.transaction_id)
+            self.export_rift(schema, format_, desc_type, desc_id, desc_msg)
+            self.log.message(message.FilenameMessage(filename))
+
+        self.log.message(ExportSuccess())
+
+        if filename is not None:
+            self.write(tornado.escape.json_encode({
+                "transaction_id": self.transaction_id,
+                "filename": filename,
+            }))
+        else:
+            self.write(tornado.escape.json_encode({
+                "transaction_id": self.transaction_id,
+            }))
+
+    def export_rift(self, schema, format_, desc_type, desc_id, desc_msg):
+        convert = rift.package.convert
+        schema_serializer_map = {
+                "rift": {
+                    "vnfd": convert.RwVnfdSerializer,
+                    "nsd": convert.RwNsdSerializer,
+                    },
+                "mano": {
+                    "vnfd": convert.VnfdSerializer,
+                    "nsd": convert.NsdSerializer,
+                    }
+                }
+
+        if schema not in schema_serializer_map:
+            raise tornado.web.HTTPError(400, "unknown schema: {}".format(schema))
+
+        if format_ != "yaml":
+            self.log.warn("Only yaml format supported for export")
+
+        if desc_type not in schema_serializer_map[schema]:
+            raise tornado.web.HTTPError(400, "unknown descriptor type: {}".format(desc_type))
+
+        # Use the rift superset schema as the source
+        src_serializer = schema_serializer_map["rift"][desc_type]()
+
+        dest_serializer = schema_serializer_map[schema][desc_type]()
+
+        package_store = self.store_map[desc_type]
+
+        # Attempt to get the package from the package store
+        # If that fails, create a temporary package using the descriptor only
+        try:
+            package = package_store.get_package(desc_id)
+        except rift.package.store.PackageNotFoundError:
+            self.log.debug("stored package not found.  creating package from descriptor config")
+
+            desc_yaml_str = src_serializer.to_yaml_string(desc_msg)
+            with io.BytesIO(desc_yaml_str.encode()) as hdl:
+                hdl.name = "{}__{}.yaml".format(desc_msg.id, desc_type)
+                package = rift.package.package.DescriptorPackage.from_descriptor_file_hdl(
+                    self.log, hdl
+                    )
+
+        self.exporter.export_package(
+                package=package,
+                export_dir=self.application.export_dir,
+                file_id=self.transaction_id,
+                json_desc_str=src_serializer.to_json_string(desc_msg),
+                dest_serializer=dest_serializer,
+                )
+
+    def export_tosca(self, format_, schema, desc_type, desc_id, desc_msg):
+        if format_ != "yaml":
+            self.log.warn("Only yaml format supported for TOSCA export")
+
+        if desc_type != "nsd":
+            raise tornado.web.HTTPError(
+                400,
+                "NSD need to passed to generate TOSCA: {}".format(desc_type))
+
+        def get_pkg_from_store(id_, type_):
+            package = None
+            # Attempt to get the package from the package store
+            try:
+                package_store = self.store_map[type_]
+                package = package_store.get_package(id_)
+
+            except rift.package.store.PackageNotFoundError:
+                self.log.debug("stored package not found for {}.".format(id_))
+            except rift.package.store.PackageStoreError:
+                self.log.debug("stored package error for {}.".format(id_))
+
+            return package
+
+        pkg = tosca.ExportTosca()
+
+        # Add NSD and related descriptors for exporting
+        nsd_id = pkg.add_nsd(desc_msg, get_pkg_from_store(desc_id, "nsd"))
+
+        catalog = self.catalog_map["vnfd"]
+        for const_vnfd in desc_msg.constituent_vnfd:
+            vnfd_id = const_vnfd.vnfd_id_ref
+            if vnfd_id in catalog:
+                pkg.add_vnfd(nsd_id,
+                             catalog[vnfd_id],
+                             get_pkg_from_store(vnfd_id, "vnfd"))
+            else:
+                raise tornado.web.HTTPError(
+                    400,
+                    "Unknown VNFD descriptor {} for NSD {}".
+                    format(vnfd_id, nsd_id))
+
+        # Create the archive.
+        pkg.create_archive(self.transaction_id,
+                           dest=self.application.export_dir)
+
+
+class ExportStateHandler(state.StateHandler):
+    STARTED = ExportStart
+    SUCCESS = ExportSuccess
+    FAILURE = ExportFailure
+
+
+@asyncio.coroutine
+def periodic_export_cleanup(log, loop, export_dir, period_secs=10 * 60, min_age_secs=30 * 60):
+    """ Periodically cleanup old exported archives (.tar.gz files) in export_dir
+
+    Arguments:
+        log - A Logger instance
+        loop - A asyncio event loop
+        export_dir - The directory to cleanup old archives in
+        period_secs - The number of seconds between clean ups
+        min_age_secs - The minimum age of a archive to be eligible for cleanup
+
+    """
+    log.debug("Starting periodic export cleaning for export directory: %s", export_dir)
+
+    # Create export dir if not created yet
+    if not os.path.exists(export_dir):
+        os.makedirs(export_dir)
+
+    while True:
+        yield from asyncio.sleep(period_secs, loop=loop)
+
+        if not os.path.exists(export_dir):
+            continue
+
+        for file_name in os.listdir(export_dir):
+            if not file_name.endswith(".tar.gz"):
+                continue
+
+            file_path = os.path.join(export_dir, file_name)
+
+            try:
+                file_stat = os.stat(file_path)
+            except OSError as e:
+                log.warning("Could not stat old exported archive: %s", str(e))
+                continue
+
+            file_age = time.time() - file_stat[stat.ST_MTIME]
+
+            if file_age < min_age_secs:
+                continue
+
+            log.debug("Cleaning up old exported archive: %s", file_path)
+
+            try:
+                os.remove(file_path)
+            except OSError as e:
+                log.warning("Failed to remove old exported archive: %s", str(e))
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/extract.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/extract.py
new file mode 100644
index 0000000..7c0eab8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/extract.py
@@ -0,0 +1,166 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import io
+import os
+import shutil
+import tarfile
+import tempfile
+import tornado.httputil
+
+import rift.package.package
+import rift.package.convert
+import rift.package.image
+import rift.package.checksums
+
+from .convert_pkg import ConvertPackage
+
+
+class ExtractError(Exception):
+    pass
+
+
+class UnreadableHeadersError(ExtractError):
+    pass
+
+
+class MissingTerminalBoundary(ExtractError):
+    pass
+
+
+class UnreadableDescriptorError(ExtractError):
+    pass
+
+
+class UnreadablePackageError(ExtractError):
+    pass
+
+
+class PackageImage(object):
+    def __init__(self, log, image_name, image_hdl, checksum=None):
+        self.name = image_name
+        self.image_hdl = image_hdl
+
+        if checksum is None:
+            log.debug("Image %s checksum not provided, calculating checksum...")
+            checksum = rift.package.checksums.checksum(self.image_hdl)
+            log.debug("Image %s checksum: %s", self.name, checksum)
+
+        self.checksum = checksum
+
+
+class UploadPackageExtractor(object):
+    def __init__(self, log):
+        self._log = log
+
+    def create_packages_from_upload(self, uploaded_file, extracted_pkgfile):
+        def create_package_from_descriptor_file(desc_hdl):
+            # Uploaded package was a plain descriptor file
+            bytes_hdl = io.BytesIO(desc_hdl.read())
+            bytes_hdl.name = uploaded_file
+            try:
+                package = rift.package.package.DescriptorPackage.from_descriptor_file_hdl(
+                        self._log, bytes_hdl
+                        )
+            except rift.package.package.PackageError as e:
+                msg = "Could not create descriptor package from descriptor: %s" % str(e)
+                self._log.error(msg)
+                raise UnreadableDescriptorError(msg) from e
+
+            return package
+
+        def create_package_from_tar_file(tar_hdl):
+            # Uploaded package was in a .tar.gz format
+            tar_archive = rift.package.package.TarPackageArchive(
+                    self._log, tar_hdl,
+                    )
+            try:
+                package = tar_archive.create_package()
+            except rift.package.package.PackageError as e:
+                msg = "Could not create package from tar archive: %s" % str(e)
+                self._log.error(msg)
+                raise UnreadablePackageError(msg) from e
+
+            return package
+
+        self._log.info("creating package from uploaded descriptor file/package")
+        tmp_pkgs = []
+        upload_hdl = None
+        try:
+            # This file handle will be passed to TemporaryPackage to be closed
+            # and the underlying file removed.
+            upload_hdl = open(extracted_pkgfile, "r+b")
+
+            # Process the package archive
+            if tarfile.is_tarfile(extracted_pkgfile):
+                package = create_package_from_tar_file(upload_hdl)
+                tmp_pkgs.append(rift.package.package.TemporaryPackage(self._log,
+                                                                      package,
+                                                                      upload_hdl))
+
+            # Check if this is just a descriptor file
+            elif rift.package.convert.ProtoMessageSerializer.is_supported_file(uploaded_file):
+                package = create_package_from_descriptor_file(upload_hdl)
+                tmp_pkgs.append(rift.package.package.TemporaryPackage(self._log,
+                                                                      package,
+                                                                      upload_hdl))
+
+            else:
+                # See if the pacakage can be converted
+                files = ConvertPackage(self._log,
+                                       uploaded_file,
+                                       extracted_pkgfile).convert(delete=True)
+
+                if files is None or not len(files):
+                    # Not converted successfully
+                    msg = "Uploaded file was neither a tar.gz or descriptor file"
+                    self._log.error(msg)
+                    raise UnreadablePackageError(msg)
+
+                # Close the open file handle as this file is not used anymore
+                upload_hdl.close()
+
+                for f in files:
+                    self._log.debug("Upload converted file: {}".format(f))
+                    upload_hdl = open(f, "r+b")
+                    package = create_package_from_tar_file(upload_hdl)
+                    tmp_pkgs.append(rift.package.package.TemporaryPackage(self._log,
+                                                                          package,
+                                                                          upload_hdl))
+
+        except Exception as e:
+            # Cleanup any TemporaryPackage instances created
+            for t in tmp_pkgs:
+                t.close()
+
+            # Close the handle if not already closed
+            try:
+                if upload_hdl is not None:
+                    upload_hdl.close()
+            except OSError as e:
+                self._log.warning("Failed to close file handle: %s", str(e))
+
+            try:
+                self._log.debug("Removing extracted package file: %s", extracted_pkgfile)
+                os.remove(extracted_pkgfile)
+            except OSError as e:
+                self._log.warning("Failed to remove extracted package dir: %s", str(e))
+
+            raise e
+
+        return tmp_pkgs
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/image.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/image.py
new file mode 100644
index 0000000..ce30981
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/image.py
@@ -0,0 +1,74 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import itertools
+import glanceclient
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+
+from rift.imagemgr import client
+
+
+class ImageUploadError(Exception):
+    pass
+
+
+class ImageUploader(object):
+    """ This class is responsible for uploading package images to cloud accounts """
+    def __init__(self, log, loop, dts):
+        """ Create an instance of ImageUploader
+
+        Arguments:
+            log - A logger
+        """
+        self._log = log
+        self._loop = loop
+        self._dts = dts
+
+        self._client = client.UploadJobClient(self._log, self._loop, self._dts)
+
+    def upload_image(self, image_name, image_checksum, image_hdl):
+        endpoint = "http://127.0.0.1:9999"
+        glance_client = glanceclient.Client('1', endpoint, token="asdf")
+
+        try:
+            for image in itertools.chain(
+                    glance_client.images.list(is_public=False),
+                    glance_client.images.list(is_public=True),
+                    ):
+                if image.name == image_name and image_checksum == image_checksum:
+                    self._log.debug("Found existing image in catalog, not re-uploading")
+                    return
+
+            self._log.debug('Uploading image to catalog: {}'.format(image_name))
+
+            image = glance_client.images.create(name=image_name, data=image_hdl, is_public="False",
+                                                disk_format="qcow2", container_format="bare",
+                                                checksum=image_checksum)
+            self._log.debug('Image upload complete: %s', image)
+        except Exception as e:
+            raise ImageUploadError("Failed to upload image to catalog: %s" % str(e)) from e
+
+    def upload_image_to_cloud_accounts(self, image_name, image_checksum, cloud_accounts=None):
+        self._log.debug("uploading image %s to all cloud accounts", image_name)
+        upload_job = self._client.create_job_threadsafe(image_name, image_checksum, cloud_accounts)
+        try:
+            upload_job.wait_until_complete_threadsafe()
+        except client.UploadJobError as e:
+            raise ImageUploadError("Failed to upload image (image_name) to cloud accounts") from e
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/message.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/message.py
new file mode 100644
index 0000000..a1827eb
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/message.py
@@ -0,0 +1,360 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+import time
+
+
+class MessageException(Exception):
+    def __init__(self, msg):
+        if not isinstance(msg, Message):
+            raise ValueError("{} is not a message".format(msg.__class__.__name__))
+
+        self.msg = msg
+
+
+class Message(object):
+    """
+    Messages are events that describe stages of the onboarding process, and
+    any event that may occur during the onboarding process.
+    """
+
+    def __init__(self, level, name, text):
+        self._level = level
+        self._name = name
+        self._text = text
+        self._timestamp = time.time()
+
+    def __repr__(self):
+        return "{} {}:{}:{}".format(
+                self.timestamp,
+                logging._levelNames.get(self.level, self.level),
+                self.name,
+                self.text,
+                )
+
+    @property
+    def level(self):
+        return self._level
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def text(self):
+        return self._text
+
+    @property
+    def timestamp(self):
+        return self._timestamp
+
+    def log(self, logger):
+        logger.log(self.level, self.text)
+
+
+class WarningMessage(Message):
+    """
+    A warning is a message that does not prevent the onboarding process for
+    continuing, but may not be the intention of the user when they initiated
+    the process.
+    """
+
+    def __init__(self, name, text):
+        super().__init__(logging.WARNING, name, text)
+
+
+class ErrorMessage(Message):
+    """
+    An error message alerts the user to an event that prevent the continuation
+    of the onboarding process.
+    """
+
+    def __init__(self, name, text):
+        super().__init__(logging.ERROR, name, text)
+
+
+class StatusMessage(Message):
+    """
+    A status message informs the user of an expected stage in the onboarding
+    process.
+    """
+
+    def __init__(self, name, text):
+        super().__init__(logging.INFO, name, text)
+
+
+class FilenameMessage(Message):
+    """
+    A status message informs the user of a download file.
+    """
+
+    def __init__(self, filename):
+        super().__init__(logging.INFO, 'filename', filename)
+
+
+class Logger(object):
+    """
+    This class is used to augment a python logger class so that messages can be
+    passed to it. Messages are recorded so that the uploader application can
+    provide this information to the client, and the messages are also recorded
+    on the server via the standard logging facilities.
+    """
+
+    def __init__(self, logger, messages):
+        self._rift_logger = logger
+        self._messages = messages
+
+    @property
+    def messages(self):
+        return self._messages
+
+    def message(self, msg):
+        msg.log(self._rift_logger)
+        self._messages.append(msg)
+
+    def __getattr__(self, name):
+        """ Return the rift logger attribute
+
+        By returning the rift logger attribute back to the client,
+        the line logged by rwlogger corresponds to the actual file/line
+        logged by the application instead of one in this class.  This makes
+        debugging easier and prevents rwlogd from inadvertantly triggering
+        dup detection (which uses event & line information).
+        """
+        return getattr(self._rift_logger, name)
+
+
+
+class OnboardError(ErrorMessage):
+    def __init__(self, msg):
+        super().__init__("onboard-error", msg)
+
+
+class OnboardWarning(ErrorMessage):
+    def __init__(self, msg):
+        super().__init__("onboard-warning", msg)
+
+
+class OnboardDescriptorValidation(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-dsc-validation", "descriptor validation")
+
+
+class OnboardDescriptorTimeout(OnboardError):
+    def __init__(self):
+        super().__init__("descriptor timeout")
+
+
+class OnboardDescriptorError(OnboardError):
+    def __init__(self, filename):
+        super().__init__("unable to onboard {}".format(filename))
+
+
+class OnboardDescriptorFormatError(OnboardError):
+    def __init__(self, filename):
+        super().__init__("{} has unrecognized format".format(filename))
+
+
+class OnboardMissingContentType(OnboardError):
+    def __init__(self):
+        super().__init__("missing content-type header")
+
+
+class OnboardUnsupportedMediaType(OnboardError):
+    def __init__(self):
+        super().__init__("multipart/form-data required")
+
+
+class OnboardMissingContentBoundary(OnboardError):
+    def __init__(self):
+        super().__init__("missing content boundary")
+
+
+class OnboardMissingTerminalBoundary(OnboardError):
+    def __init__(self):
+        super().__init__("Unable to find terminal content boundary")
+
+
+class OnboardUnreadableHeaders(OnboardError):
+    def __init__(self):
+        super().__init__("Unable to read message headers")
+
+
+class OnboardUnreadablePackage(OnboardError):
+    def __init__(self):
+        super().__init__("Unable to read package")
+
+
+class OnboardExtractionError(OnboardError):
+    def __init__(self):
+        super().__init__("Unable to extract package contents")
+
+
+class OnboardImageUploadError(OnboardError):
+    def __init__(self, message=""):
+        super().__init__("Unable to upload images: %s" % message)
+
+
+class OnboardMissingChecksumsFile(OnboardError):
+    def __init__(self):
+        super().__init__("Package does not contain checksums.txt")
+
+
+class OnboardChecksumMismatch(OnboardError):
+    def __init__(self, filename):
+        super().__init__("checksum mismatch for {}".format(filename))
+
+
+class OnboardDescriptorExistsError(OnboardError):
+    def __init__(self, descriptor_id):
+        super().__init__("descriptor id {} already onboarded".format(descriptor_id))
+
+
+
+class OnboardStart(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-started", "onboarding process started")
+
+
+class OnboardDescriptorOnboard(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-dsc-onboard", "onboarding descriptors")
+
+
+class OnboardSuccess(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-success", "onboarding process successfully completed")
+
+
+class OnboardFailure(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-failure", "onboarding process failed")
+
+
+class OnboardPackageUpload(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-pkg-upload", "uploading package")
+
+
+class OnboardImageUpload(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-img-upload", "uploading image")
+
+
+class OnboardPackageValidation(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-pkg-validation", "package contents validation")
+
+
+
+class UpdateError(ErrorMessage):
+    def __init__(self, msg):
+        super().__init__("update-error", msg)
+
+
+class UpdateMissingContentType(UpdateError):
+    def __init__(self):
+        super().__init__("missing content-type header")
+
+
+class UpdateUnsupportedMediaType(UpdateError):
+    def __init__(self):
+        super().__init__("multipart/form-data required")
+
+
+class UpdateMissingContentBoundary(UpdateError):
+    def __init__(self):
+        super().__init__("missing content boundary")
+
+
+class UpdateDescriptorError(UpdateError):
+    def __init__(self, filename):
+        super().__init__("unable to update {}".format(filename))
+
+
+class UpdatePackageNotFoundError(UpdateError):
+    def __init__(self, descriptor_id):
+        super().__init__("package {} not found".format(descriptor_id))
+
+
+class UpdateDescriptorFormatError(UpdateError):
+    def __init__(self, filename):
+        super().__init__("{} has unrecognized format".format(filename))
+
+
+class UpdateExtractionError(UpdateError):
+    def __init__(self):
+        super().__init__("Unable to extract package contents")
+
+
+class UpdateDescriptorTimeout(UpdateError):
+    def __init__(self):
+        super().__init__("descriptor timeout")
+
+
+class UpdateUnreadableHeaders(UpdateError):
+    def __init__(self):
+        super().__init__("Unable to read message headers")
+
+
+class UpdateUnreadablePackage(UpdateError):
+    def __init__(self):
+        super().__init__("Unable to read package")
+
+
+class UpdateChecksumMismatch(UpdateError):
+    def __init__(self, filename):
+        super().__init__("checksum mismatch for {}".format(filename))
+
+
+class UpdateImageUploadError(UpdateError):
+    def __init__(self):
+        super().__init__("Unable to upload images")
+
+
+class UpdateStart(StatusMessage):
+    def __init__(self):
+        super().__init__("update-started", "update process started")
+
+
+class UpdateSuccess(StatusMessage):
+    def __init__(self):
+        super().__init__("update-success", "updating process successfully completed")
+
+
+class UpdateFailure(StatusMessage):
+    def __init__(self):
+        super().__init__("update-failure", "updating process failed")
+
+
+class UpdatePackageUpload(StatusMessage):
+    def __init__(self):
+        super().__init__("update-pkg-upload", "uploading package")
+
+
+class UpdateDescriptorUpdate(StatusMessage):
+    def __init__(self):
+        super().__init__("update-dsc-onboard", "updating descriptors")
+
+
+class UpdateDescriptorUpdated(StatusMessage):
+    def __init__(self):
+        super().__init__("update-dsc-updated", "updated descriptors")
+
+
+
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/onboard.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/onboard.py
new file mode 100644
index 0000000..f777c97
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/onboard.py
@@ -0,0 +1,164 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import requests
+
+from rift.package import convert
+from gi.repository import (
+    NsdYang,
+    RwNsdYang,
+    VnfdYang,
+    RwVnfdYang,
+)
+
+
+class OnboardError(Exception):
+    pass
+
+
+class UpdateError(Exception):
+    pass
+
+
+class DescriptorOnboarder(object):
+    """ This class is responsible for onboarding descriptors using Restconf"""
+    DESC_ENDPOINT_MAP = {
+            NsdYang.YangData_Nsd_NsdCatalog_Nsd: "nsd-catalog/nsd",
+            RwNsdYang.YangData_Nsd_NsdCatalog_Nsd: "nsd-catalog/nsd",
+            VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
+            RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
+            }
+
+    DESC_SERIALIZER_MAP = {
+            NsdYang.YangData_Nsd_NsdCatalog_Nsd: convert.NsdSerializer(),
+            RwNsdYang.YangData_Nsd_NsdCatalog_Nsd: convert.RwNsdSerializer(),
+            VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: convert.VnfdSerializer(),
+            RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: convert.RwVnfdSerializer(),
+            }
+
+    HEADERS = {"content-type": "application/vnd.yang.data+json"}
+    TIMEOUT_SECS = 5
+    AUTH = ('admin', 'admin')
+
+    def __init__(self, log, host="127.0.0.1", port=8008, use_ssl=False, ssl_cert=None, ssl_key=None):
+        self._log = log
+        self._host = host
+        self.port = port
+        self._use_ssl = use_ssl
+        self._ssl_cert = ssl_cert
+        self._ssl_key = ssl_key
+
+        self.timeout = DescriptorOnboarder.TIMEOUT_SECS
+
+    @classmethod
+    def _get_headers(cls, auth):
+        headers = cls.HEADERS.copy()
+        if auth is not None:
+            headers['authorization'] = auth
+
+        return headers
+
+    def _get_url(self, descriptor_msg):
+        if type(descriptor_msg) not in DescriptorOnboarder.DESC_SERIALIZER_MAP:
+            raise TypeError("Invalid descriptor message type")
+
+        endpoint = DescriptorOnboarder.DESC_ENDPOINT_MAP[type(descriptor_msg)]
+
+        url = "{}://{}:{}/api/config/{}".format(
+                "https" if self._use_ssl else "http",
+                self._host,
+                self.port,
+                endpoint,
+                )
+
+        return url
+
+    def _make_request_args(self, descriptor_msg, auth=None):
+        if type(descriptor_msg) not in DescriptorOnboarder.DESC_SERIALIZER_MAP:
+            raise TypeError("Invalid descriptor message type")
+
+        serializer = DescriptorOnboarder.DESC_SERIALIZER_MAP[type(descriptor_msg)]
+        json_data = serializer.to_json_string(descriptor_msg)
+        url = self._get_url(descriptor_msg)
+
+        request_args = dict(
+            url=url,
+            data=json_data,
+            headers=self._get_headers(auth),
+            auth=DescriptorOnboarder.AUTH,
+            verify=False,
+            cert=(self._ssl_cert, self._ssl_key) if self._use_ssl else None,
+            timeout=self.timeout,
+        )
+
+        return request_args
+
+    def update(self, descriptor_msg, auth=None):
+        """ Update the descriptor config
+
+        Arguments:
+            descriptor_msg - A descriptor proto-gi msg
+            auth - the authorization header
+
+        Raises:
+            UpdateError - The descriptor config update failed
+        """
+        request_args = self._make_request_args(descriptor_msg, auth)
+        try:
+            response = requests.put(**request_args)
+            response.raise_for_status()
+        except requests.exceptions.ConnectionError as e:
+            msg = "Could not connect to restconf endpoint: %s" % str(e)
+            self._log.error(msg)
+            raise UpdateError(msg) from e
+        except requests.exceptions.HTTPError as e:
+            msg = "PUT request to %s error: %s" % (request_args["url"], response.text)
+            self._log.error(msg)
+            raise UpdateError(msg) from e
+        except requests.exceptions.Timeout as e:
+            msg = "Timed out connecting to restconf endpoint: %s", str(e)
+            self._log.error(msg)
+            raise UpdateError(msg) from e
+
+    def onboard(self, descriptor_msg, auth=None):
+        """ Onboard the descriptor config
+
+        Arguments:
+            descriptor_msg - A descriptor proto-gi msg
+            auth - the authorization header
+
+        Raises:
+            OnboardError - The descriptor config update failed
+        """
+
+        request_args = self._make_request_args(descriptor_msg, auth)
+        try:
+            response = requests.post(**request_args)
+            response.raise_for_status()
+        except requests.exceptions.ConnectionError as e:
+            msg = "Could not connect to restconf endpoint: %s" % str(e)
+            self._log.error(msg)
+            raise OnboardError(msg) from e
+        except requests.exceptions.HTTPError as e:
+            msg = "POST request to %s error: %s" % (request_args["url"], response.text)
+            self._log.error(msg)
+            raise OnboardError(msg) from e
+        except requests.exceptions.Timeout as e:
+            msg = "Timed out connecting to restconf endpoint: %s", str(e)
+            self._log.error(msg)
+            raise OnboardError(msg) from e
+
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/state.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/state.py
new file mode 100644
index 0000000..0028c12
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/state.py
@@ -0,0 +1,107 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import tornado.web
+
+from . import message
+
+
+class StateHandler(tornado.web.RequestHandler):
+    def options(self, *args, **kargs):
+        pass
+
+    def set_default_headers(self):
+        self.set_header('Access-Control-Allow-Origin', '*')
+        self.set_header('Access-Control-Allow-Headers',
+                        'Content-Type, Cache-Control, Accept, X-Requested-With, Authorization')
+        self.set_header('Access-Control-Allow-Methods', 'POST, GET, PUT, DELETE')
+
+    def initialize(self, log, loop):
+        self.log = log
+        self.loop = loop
+
+    def success(self, messages):
+        success = self.__class__.SUCCESS
+        return any(isinstance(msg, success) for msg in messages)
+
+    def failure(self, messages):
+        failure = self.__class__.FAILURE
+        return any(isinstance(msg, failure) for msg in messages)
+
+    def started(self, messages):
+        started = self.__class__.STARTED
+        return any(isinstance(msg, started) for msg in messages)
+
+    def status(self, messages):
+        if self.failure(messages):
+            return "failure"
+        elif self.success(messages):
+            return "success"
+        return "pending"
+
+    def notifications(self, messages):
+        notifications = {
+                "errors": list(),
+                "events": list(),
+                "warnings": list(),
+                }
+
+        for msg in messages:
+            if isinstance(msg, message.StatusMessage):
+                notifications["events"].append({
+                    'value': msg.name,
+                    'text': msg.text,
+                    'timestamp': msg.timestamp,
+                    })
+                continue
+
+            elif isinstance(msg, message.WarningMessage):
+                notifications["warnings"].append({
+                    'value': msg.text,
+                    'timestamp': msg.timestamp,
+                    })
+                continue
+
+            elif isinstance(msg, message.ErrorMessage):
+                notifications["errors"].append({
+                    'value': msg.text,
+                    'timestamp': msg.timestamp,
+                    })
+                continue
+
+            elif isinstance(msg, message.FilenameMessage):
+                notifications["filename"] = msg.text
+                continue
+
+            self.log.warning('unrecognized message: {}'.format(msg))
+
+        return notifications
+
+    def get(self, transaction_id):
+        if transaction_id not in self.application.messages:
+            raise tornado.web.HTTPError(404, "unrecognized transaction ID")
+
+        messages = self.application.messages[transaction_id]
+        messages.sort(key=lambda m: m.timestamp)
+
+        if not self.started(messages):
+            raise tornado.web.HTTPError(404, "unrecognized transaction ID")
+
+        notifications = self.notifications(messages)
+        notifications["status"] = self.status(messages)
+
+        self.write(tornado.escape.json_encode(notifications))
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py
new file mode 100644
index 0000000..ca09d33
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py
@@ -0,0 +1,457 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+
+import tornado
+import tornado.httputil
+import tornado.httpserver
+import tornado.platform.asyncio
+
+import tornadostreamform.multipart_streamer as multipart_streamer
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwLaunchpadYang as rwlaunchpad,
+    RwcalYang as rwcal,
+    RwTypes,
+)
+
+import rift.tasklets
+import rift.mano.cloud
+import rift.mano.config_agent
+from rift.package import store
+
+from . import uploader
+from . import datacenters
+
+MB = 1024 * 1024
+GB = 1024 * MB
+TB = 1024 * GB
+
+MAX_BUFFER_SIZE = 1 * MB  # Max. size loaded into memory!
+MAX_BODY_SIZE = 1 * MB  # Max. size loaded into memory!
+
+
+def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
+    # Unforunately, it is currently difficult to figure out what has exactly
+    # changed in this xact without Pbdelta support (RIFT-4916)
+    # As a workaround, we can fetch the pre and post xact elements and
+    # perform a comparison to figure out adds/deletes/updates
+    xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+    curr_cfgs = list(dts_member_reg.elements)
+
+    xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+    curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+    # Find Adds
+    added_keys = set(xact_key_map) - set(curr_key_map)
+    added_cfgs = [xact_key_map[key] for key in added_keys]
+
+    # Find Deletes
+    deleted_keys = set(curr_key_map) - set(xact_key_map)
+    deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+    # Find Updates
+    updated_keys = set(curr_key_map) & set(xact_key_map)
+    updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
+
+    return added_cfgs, deleted_cfgs, updated_cfgs
+
+
+class CatalogDtsHandler(object):
+    def __init__(self, tasklet, app):
+        self.app = app
+        self.reg = None
+        self.tasklet = tasklet
+
+    @property
+    def log(self):
+        return self.tasklet.log
+
+    @property
+    def dts(self):
+        return self.tasklet.dts
+
+
+class NsdCatalogDtsHandler(CatalogDtsHandler):
+    XPATH = "C,/nsd:nsd-catalog/nsd:nsd"
+
+    def add_nsd(self, nsd):
+        self.log.debug('nsd-catalog-handler:add:{}'.format(nsd.id))
+        if nsd.id not in self.tasklet.nsd_catalog:
+            self.tasklet.nsd_catalog[nsd.id] = nsd
+        else:
+            self.log.error("nsd already in catalog: {}".format(nsd.id))
+
+    def update_nsd(self, nsd):
+        self.log.debug('nsd-catalog-handler:update:{}'.format(nsd.id))
+        if nsd.id in self.tasklet.nsd_catalog:
+            self.tasklet.nsd_catalog[nsd.id] = nsd
+        else:
+            self.log.error("unrecognized NSD: {}".format(nsd.id))
+
+    def delete_nsd(self, nsd_id):
+        self.log.debug('nsd-catalog-handler:delete:{}'.format(nsd_id))
+        if nsd_id in self.tasklet.nsd_catalog:
+            del self.tasklet.nsd_catalog[nsd_id]
+        else:
+            self.log.error("unrecognized NSD: {}".format(nsd_id))
+
+        try:
+            self.tasklet.nsd_package_store.delete_package(nsd_id)
+        except store.PackageStoreError as e:
+            self.log.warning("could not delete package from store: %s", str(e))
+
+    @asyncio.coroutine
+    def register(self):
+        def apply_config(dts, acg, xact, action, _):
+            if xact.xact is None:
+                # When RIFT first comes up, an INSTALL is called with the current config
+                # Since confd doesn't actally persist data this never has any data so
+                # skip this for now.
+                self.log.debug("No xact handle.  Skipping apply config")
+                return
+
+            add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+                    dts_member_reg=self.reg,
+                    xact=xact,
+                    key_name="id",
+                    )
+
+            # Handle Deletes
+            for cfg in delete_cfgs:
+                self.delete_nsd(cfg.id)
+
+            # Handle Adds
+            for cfg in add_cfgs:
+                self.add_nsd(cfg)
+
+            # Handle Updates
+            for cfg in update_cfgs:
+                self.update_nsd(cfg)
+
+        self.log.debug("Registering for NSD catalog")
+
+        acg_handler = rift.tasklets.AppConfGroup.Handler(
+                        on_apply=apply_config,
+                        )
+
+        with self.dts.appconf_group_create(acg_handler) as acg:
+            self.reg = acg.register(
+                    xpath=NsdCatalogDtsHandler.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER,
+                    )
+
+
+class VnfdCatalogDtsHandler(CatalogDtsHandler):
+    XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+
+    def add_vnfd(self, vnfd):
+        self.log.debug('vnfd-catalog-handler:add:{}'.format(vnfd.id))
+        if vnfd.id not in self.tasklet.vnfd_catalog:
+            self.tasklet.vnfd_catalog[vnfd.id] = vnfd
+
+        else:
+            self.log.error("VNFD already in catalog: {}".format(vnfd.id))
+
+    def update_vnfd(self, vnfd):
+        self.log.debug('vnfd-catalog-handler:update:{}'.format(vnfd.id))
+        if vnfd.id in self.tasklet.vnfd_catalog:
+            self.tasklet.vnfd_catalog[vnfd.id] = vnfd
+
+        else:
+            self.log.error("unrecognized VNFD: {}".format(vnfd.id))
+
+    def delete_vnfd(self, vnfd_id):
+        self.log.debug('vnfd-catalog-handler:delete:{}'.format(vnfd_id))
+        if vnfd_id in self.tasklet.vnfd_catalog:
+            del self.tasklet.vnfd_catalog[vnfd_id]
+        else:
+            self.log.error("unrecognized VNFD: {}".format(vnfd_id))
+
+        try:
+            self.tasklet.vnfd_package_store.delete_package(vnfd_id)
+        except store.PackageStoreError as e:
+            self.log.warning("could not delete package from store: %s", str(e))
+
+    @asyncio.coroutine
+    def register(self):
+        def apply_config(dts, acg, xact, action, _):
+            if xact.xact is None:
+                # When RIFT first comes up, an INSTALL is called with the current config
+                # Since confd doesn't actally persist data this never has any data so
+                # skip this for now.
+                self.log.debug("No xact handle.  Skipping apply config")
+                return
+
+            add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+                    dts_member_reg=self.reg,
+                    xact=xact,
+                    key_name="id",
+                    )
+
+            # Handle Deletes
+            for cfg in delete_cfgs:
+                self.delete_vnfd(cfg.id)
+
+            # Handle Adds
+            for cfg in add_cfgs:
+                self.add_vnfd(cfg)
+
+            # Handle Updates
+            for cfg in update_cfgs:
+                self.update_vnfd(cfg)
+
+        self.log.debug("Registering for VNFD catalog")
+
+        acg_handler = rift.tasklets.AppConfGroup.Handler(
+                        on_apply=apply_config,
+                        )
+
+        with self.dts.appconf_group_create(acg_handler) as acg:
+            self.reg = acg.register(
+                    xpath=VnfdCatalogDtsHandler.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER,
+                    )
+
+class CfgAgentAccountHandlers(object):
+    def __init__(self, dts, log, log_hdl, loop):
+        self._dts = dts
+        self._log = log
+        self._log_hdl = log_hdl
+        self._loop = loop
+
+        self._log.debug("creating config agent account config handler")
+        self.cfg_agent_cfg_handler = rift.mano.config_agent.ConfigAgentSubscriber(
+            self._dts, self._log,
+            rift.mano.config_agent.ConfigAgentCallbacks(
+                on_add_apply=self.on_cfg_agent_account_added,
+                on_delete_apply=self.on_cfg_agent_account_deleted,
+            )
+        )
+
+        self._log.debug("creating config agent account opdata handler")
+        self.cfg_agent_operdata_handler = rift.mano.config_agent.CfgAgentDtsOperdataHandler(
+            self._dts, self._log, self._loop,
+        )
+
+    def on_cfg_agent_account_deleted(self, account):
+        self._log.debug("config agent account deleted")
+        self.cfg_agent_operdata_handler.delete_cfg_agent_account(account.name)
+
+    def on_cfg_agent_account_added(self, account):
+        self._log.debug("config agent account added")
+        self.cfg_agent_operdata_handler.add_cfg_agent_account(account)
+
+    @asyncio.coroutine
+    def register(self):
+        self.cfg_agent_cfg_handler.register()
+        yield from self.cfg_agent_operdata_handler.register()
+
+class CloudAccountHandlers(object):
+    def __init__(self, dts, log, log_hdl, loop, app):
+        self._log = log
+        self._log_hdl = log_hdl
+        self._dts = dts
+        self._loop = loop
+        self._app = app
+
+        self._log.debug("creating cloud account config handler")
+        self.cloud_cfg_handler = rift.mano.cloud.CloudAccountConfigSubscriber(
+            self._dts, self._log, self._log_hdl,
+            rift.mano.cloud.CloudAccountConfigCallbacks(
+                on_add_apply=self.on_cloud_account_added,
+                on_delete_apply=self.on_cloud_account_deleted,
+            )
+        )
+
+        self._log.debug("creating cloud account opdata handler")
+        self.cloud_operdata_handler = rift.mano.cloud.CloudAccountDtsOperdataHandler(
+            self._dts, self._log, self._loop,
+        )
+
+    def on_cloud_account_deleted(self, account_name):
+        self._log.debug("cloud account deleted")
+        self._app.accounts.clear()
+        self._app.accounts.extend(list(self.cloud_cfg_handler.accounts.values()))
+        self.cloud_operdata_handler.delete_cloud_account(account_name)
+
+    def on_cloud_account_added(self, account):
+        self._log.debug("cloud account added")
+        self._app.accounts.clear()
+        self._app.accounts.extend(list(self.cloud_cfg_handler.accounts.values()))
+        self._log.debug("accounts: %s", self._app.accounts)
+        self.cloud_operdata_handler.add_cloud_account(account)
+
+    @asyncio.coroutine
+    def register(self):
+        self.cloud_cfg_handler.register()
+        yield from self.cloud_operdata_handler.register()
+
+
+class LaunchpadTasklet(rift.tasklets.Tasklet):
+    UPLOAD_MAX_BODY_SIZE = MAX_BODY_SIZE
+    UPLOAD_MAX_BUFFER_SIZE = MAX_BUFFER_SIZE
+    UPLOAD_PORT = "4567"
+
+    def __init__(self, *args, **kwargs):
+        super(LaunchpadTasklet, self).__init__(*args, **kwargs)
+        self.rwlog.set_category("rw-mano-log")
+        self.rwlog.set_subcategory("launchpad")
+
+        self.app = None
+        self.server = None
+
+        self.account_handler = None
+        self.config_handler = None
+        self.nsd_catalog_handler = None
+        self.vld_catalog_handler = None
+        self.vnfd_catalog_handler = None
+        self.cloud_handler = None
+        self.datacenter_handler = None
+        self.lp_config_handler = None
+
+        self.vnfd_package_store = store.VnfdPackageFilesystemStore(self.log)
+        self.nsd_package_store = store.NsdPackageFilesystemStore(self.log)
+
+        self.nsd_catalog = dict()
+        self.vld_catalog = dict()
+        self.vnfd_catalog = dict()
+
+    @property
+    def cloud_accounts(self):
+        if self.cloud_handler is None:
+            return list()
+
+        return list(self.cloud_handler.cloud_cfg_handler.accounts.values())
+
+    def start(self):
+        super(LaunchpadTasklet, self).start()
+        self.log.info("Starting LaunchpadTasklet")
+
+        self.log.debug("Registering with dts")
+        self.dts = rift.tasklets.DTS(
+                self.tasklet_info,
+                rwlaunchpad.get_schema(),
+                self.loop,
+                self.on_dts_state_change
+                )
+
+        self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+    def stop(self):
+        try:
+            self.server.stop()
+            self.dts.deinit()
+        except Exception:
+            self.log.exception("Caught Exception in LP stop")
+            raise
+
+    @asyncio.coroutine
+    def init(self):
+        io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
+        self.app = uploader.UploaderApplication(self)
+
+        manifest = self.tasklet_info.get_pb_manifest()
+        ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
+        ssl_key = manifest.bootstrap_phase.rwsecurity.key
+        ssl_options = {
+                "certfile": ssl_cert,
+                "keyfile": ssl_key,
+                }
+
+        if manifest.bootstrap_phase.rwsecurity.use_ssl:
+            self.server = tornado.httpserver.HTTPServer(
+                self.app,
+                max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
+                io_loop=io_loop,
+                ssl_options=ssl_options,
+            )
+
+        else:
+            self.server = tornado.httpserver.HTTPServer(
+                self.app,
+                max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
+                io_loop=io_loop,
+            )
+
+        self.log.debug("creating NSD catalog handler")
+        self.nsd_catalog_handler = NsdCatalogDtsHandler(self, self.app)
+        yield from self.nsd_catalog_handler.register()
+
+        self.log.debug("creating VNFD catalog handler")
+        self.vnfd_catalog_handler = VnfdCatalogDtsHandler(self, self.app)
+        yield from self.vnfd_catalog_handler.register()
+
+        self.log.debug("creating datacenter handler")
+        self.datacenter_handler = datacenters.DataCenterPublisher(self)
+        yield from self.datacenter_handler.register()
+
+        self.log.debug("creating cloud account handler")
+        self.cloud_handler = CloudAccountHandlers(
+                self.dts, self.log, self.log_hdl, self.loop, self.app
+                )
+        yield from self.cloud_handler.register()
+
+        self.log.debug("creating config agent handler")
+        self.config_handler = CfgAgentAccountHandlers(self.dts, self.log, self.log_hdl, self.loop)
+        yield from self.config_handler.register()
+
+    @asyncio.coroutine
+    def run(self):
+        self.server.listen(LaunchpadTasklet.UPLOAD_PORT)
+
+    def on_instance_started(self):
+        self.log.debug("Got instance started callback")
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Handle DTS state change
+
+        Take action according to current DTS state to transition application
+        into the corresponding application state
+
+        Arguments
+            state - current dts state
+
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self.dts.handle.set_state(next_state)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tosca.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tosca.py
new file mode 100644
index 0000000..8ccc899
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tosca.py
@@ -0,0 +1,240 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import logging
+import os
+import shutil
+import subprocess
+import tempfile
+import uuid
+import zipfile
+
+from rift.mano.tosca_translator.shell import TranslatorShell
+from rift.mano.yang_translator.rwmano.yang_translator import YangTranslator
+
+
+class ToscaPackageError(Exception):
+    pass
+
+
+class ToscaPackageReadError(Exception):
+    pass
+
+
+class InvalidToscaPackageError(ToscaPackageError):
+    pass
+
+
+class ToscaTranslateError(ToscaPackageError):
+    pass
+
+
+class YangTranslateError(Exception):
+    pass
+
+
+class ToscaArchiveCreateError(YangTranslateError):
+    pass
+
+
+class YangTranslateNsdError(YangTranslateError):
+    pass
+
+
+class ExportTosca(object):
+    def __init__(self, log=None):
+        if log is None:
+            self.log = logging.getLogger("rw-mano-log")
+        else:
+            self.log = log
+        self.nsds = {}
+        self.csars = list()
+
+    def add_image(self, nsd_id, image, chksum=None):
+        if image.name not in self.images:
+            self.images[image.name] = image
+
+    def add_vld(self, nsd_id, vld, pkg=None):
+        if not 'vlds' in self.nsds[nsd_id]:
+            self.nsds[nsd_id]['vlds'] = []
+        self.nsds[nsd_id]['vlds'].append(vld)
+        if pkg:
+            self.nsds[nsd_id]['pkgs'].append(pkg)
+
+    def add_vnfd(self, nsd_id, vnfd, pkg=None):
+        if not 'vnfds' in self.nsds[nsd_id]:
+            self.nsds[nsd_id]['vnfds'] = []
+        self.nsds[nsd_id]['vnfds'].append(vnfd)
+        if pkg:
+            self.nsds[nsd_id]['pkgs'].append(pkg)
+
+    def add_nsd(self, nsd, pkg=None):
+        nsd_id = str(uuid.uuid4())
+        self.nsds[nsd_id] = {'nsd': nsd}
+        self.nsds[nsd_id]['pkgs'] = []
+        if pkg:
+            self.nsds[nsd_id]['pkgs'].append(pkg)
+        return nsd_id
+
+    def create_csar(self, nsd_id, dest=None):
+        if dest is None:
+            dest = tempfile.mkdtemp()
+
+        # Convert YANG to dict
+        yangs = {}
+        yangs['vnfd'] = []
+        for vnfd in self.nsds[nsd_id]['vnfds']:
+            yangs['vnfd'].append(vnfd.as_dict())
+            self.log.debug("Translate VNFD: {}".format(vnfd.as_dict()))
+        yangs['nsd'] = []
+        yangs['nsd'].append(self.nsds[nsd_id]['nsd'].as_dict())
+        self.log.debug("Translate NSD : {}".format(yangs['nsd']))
+
+        # Translate YANG model to TOSCA template
+        translator = YangTranslator(self.log,
+                                    yangs=yangs,
+                                    packages=self.nsds[nsd_id]['pkgs'])
+        output = translator.translate()
+        self.csars.extend(translator.write_output(output,
+                                                  output_dir=dest,
+                                                  archive=True))
+        self.log.debug("Created CSAR archive {}".format(self.csars[-1]))
+
+    def create_archive(self, archive_name, dest=None):
+        if not len(self.nsds):
+            self.log.error("Did not find any NSDs to export")
+            return
+
+        if dest is None:
+            dest = tempfile.mkdtemp()
+
+        prevdir = os.getcwd()
+
+        if not os.path.exists(dest):
+            os.makedirs(dest)
+
+        try:
+            # Convert each NSD to a TOSCA template
+            for nsd_id in self.nsds:
+                # Not passing the dest dir to prevent clash in case
+                # multiple export of the same desc happens
+                self.create_csar(nsd_id)
+
+        except Exception as e:
+            msg = "Exception converting NSD {}: {}".format(nsd_id, e)
+            self.log.exception(e)
+            raise YangTranslateNsdError(msg)
+
+        os.chdir(dest)
+
+        try:
+            if archive_name.endswith(".zip"):
+                archive_name = archive_name[:-4]
+
+            archive_path = os.path.join(dest, archive_name)
+
+            # Construct a zip of the csar archives
+            zip_name = '{}.zip'.format(archive_path)
+
+            if len(self.csars) == 1:
+                # Only 1 TOSCA template, just rename csar if required
+                if self.csars[0] != zip_name:
+                    mv_cmd = "mv {} {}".format(self.csars[0], zip_name)
+                    subprocess.check_call(mv_cmd, shell=True, stdout=subprocess.DEVNULL)
+                    # Remove the temporary directory created
+                    shutil.rmtree(os.path.dirname(self.csars[0]))
+
+            else:
+                with zipfile.ZipFile('{}.partial'.format(zip_name), 'w') as zf:
+                    for csar in self.csars:
+                        # Move file to the current dest dir
+                        if os.path.dirname(csar) != dest:
+                            file_mv = "mv {} {}".format(csar, dest)
+                            subprocess.check_call(file_mv,
+                                                  shell=True,
+                                                  stdout=subprocess.DEVNULL)
+                            # Remove the temporary directory created
+                            shutil.rmtree(os.path.dirname(csar))
+
+                        csar_f = os.basename(csar)
+                        # Now add to the archive
+                        zf.write(csar_f)
+                        # Remove the csar file
+                        os.remove(csar_f)
+
+                    # Rename archive to final name
+                    mv_cmd = "mv {0}.partial {0}".format(zip_name)
+                    subprocess.check_call(mv_cmd, shell=True, stdout=subprocess.DEVNULL)
+
+            return zip_name
+
+        except Exception as e:
+            msg = "Creating CSAR archive failed: {0}".format(e)
+            self.log.exception(e)
+            raise YangTranslateError(msg)
+
+        finally:
+            os.chdir(prevdir)
+
+class ImportTosca(object):
+
+    def __init__(self, log, in_file, out_dir=None):
+        if log is None:
+            self.log = logging.getLogger("rw-mano-log")
+        else:
+            self.log = log
+        self.log = log
+        self.in_file = in_file
+        self.out_dir = out_dir
+
+    def translate(self):
+        # Check if the input file is a zip file
+        if not zipfile.is_zipfile(self.in_file):
+            err_msg = "{} is not a zip file.".format(self.in_file)
+            self.log.error(err_msg)
+            raise InvalidToscaPackageError(err_msg)
+
+        try:
+            # Store the current working directory
+            prevdir = os.getcwd()
+
+            # See if we need to create a output directory
+            out_dir = self.out_dir
+            if out_dir is None:
+                out_dir = tempfile.mkdtemp()
+
+            # Call the TOSCA translator
+            self.log.debug("Calling tosca-translator for {}".
+                           format(self.in_file))
+            return TranslatorShell(self.log).translate(self.in_file,
+                                                       out_dir,
+                                                       archive=True)
+
+        except Exception as e:
+            self.log.exception(e)
+            raise ToscaTranslateError("Error translating TOSCA package {}: {}".
+                                      format(self.in_file, e))
+
+        finally:
+                os.chdir(prevdir)
+
+    @staticmethod
+    def is_tosca_package(in_file):
+        if zipfile.is_zipfile(in_file):
+            return True
+        else:
+            return False
+
+
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py
new file mode 100644
index 0000000..081c1f5
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py
@@ -0,0 +1,881 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import os
+import threading
+import uuid
+import zlib
+
+import tornado
+import tornado.escape
+import tornado.ioloop
+import tornado.web
+import tornado.httputil
+import tornadostreamform.multipart_streamer as multipart_streamer
+
+import requests
+
+# disable unsigned certificate warning
+from requests.packages.urllib3.exceptions import InsecureRequestWarning
+requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+
+import gi
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('NsdYang', '1.0')
+gi.require_version('VnfdYang', '1.0')
+
+from gi.repository import (
+        NsdYang,
+        VnfdYang,
+        )
+import rift.mano.cloud
+
+import rift.package.charm
+import rift.package.checksums
+import rift.package.config
+import rift.package.convert
+import rift.package.icon
+import rift.package.package
+import rift.package.script
+import rift.package.store
+
+from . import (
+        export,
+        extract,
+        image,
+        message,
+        onboard,
+        state,
+        )
+
+from .message import (
+        MessageException,
+
+        # Onboard Error Messages
+        OnboardChecksumMismatch,
+        OnboardDescriptorError,
+        OnboardDescriptorExistsError,
+        OnboardDescriptorFormatError,
+        OnboardError,
+        OnboardExtractionError,
+        OnboardImageUploadError,
+        OnboardMissingContentBoundary,
+        OnboardMissingContentType,
+        OnboardMissingTerminalBoundary,
+        OnboardUnreadableHeaders,
+        OnboardUnreadablePackage,
+        OnboardUnsupportedMediaType,
+
+        # Onboard Status Messages
+        OnboardDescriptorOnboard,
+        OnboardFailure,
+        OnboardImageUpload,
+        OnboardPackageUpload,
+        OnboardPackageValidation,
+        OnboardStart,
+        OnboardSuccess,
+
+
+        # Update Error Messages
+        UpdateChecksumMismatch,
+        UpdateDescriptorError,
+        UpdateDescriptorFormatError,
+        UpdateError,
+        UpdateExtractionError,
+        UpdateImageUploadError,
+        UpdateMissingContentBoundary,
+        UpdateMissingContentType,
+        UpdatePackageNotFoundError,
+        UpdateUnreadableHeaders,
+        UpdateUnreadablePackage,
+        UpdateUnsupportedMediaType,
+
+        # Update Status Messages
+        UpdateDescriptorUpdate,
+        UpdateDescriptorUpdated,
+        UpdatePackageUpload,
+        UpdateStart,
+        UpdateSuccess,
+        UpdateFailure,
+        )
+
+from .tosca import ExportTosca
+
+MB = 1024 * 1024
+GB = 1024 * MB
+
+MAX_STREAMED_SIZE = 5 * GB
+
+
+class HttpMessageError(Exception):
+    def __init__(self, code, msg):
+        self.code = code
+        self.msg = msg
+
+
+class GzipTemporaryFileStreamedPart(multipart_streamer.TemporaryFileStreamedPart):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+        # Create a decompressor for gzip data to decompress on the fly during upload
+        # http://stackoverflow.com/questions/2423866/python-decompressing-gzip-chunk-by-chunk
+        self._decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+    def feed(self, data):
+        decompressed_data = self._decompressor.decompress(data)
+        if decompressed_data:
+            super().feed(decompressed_data)
+
+    def finalize(self):
+        # All data has arrived, flush the decompressor to get any last decompressed data
+        decompressed_data = self._decompressor.flush()
+        super().feed(decompressed_data)
+        super().finalize()
+
+
+class GzipMultiPartStreamer(multipart_streamer.MultiPartStreamer):
+    """ This Multipart Streamer decompresses gzip files on the fly during multipart upload """
+
+    @staticmethod
+    def _get_descriptor_name_from_headers(headers):
+        descriptor_filename = None
+
+        for entry in headers:
+            if entry["value"] != "form-data":
+                continue
+
+            form_data_params = entry["params"]
+            if "name" in form_data_params:
+                if form_data_params["name"] != "descriptor":
+                    continue
+
+                if "filename" not in form_data_params:
+                    continue
+
+                descriptor_filename = form_data_params["filename"]
+
+        return descriptor_filename
+
+    def create_part(self, headers):
+        """ Create the StreamedPart subclass depending on the descriptor filename
+
+        For gzipped descriptor packages, create a GzipTemporaryFileStreamedPart which
+        can decompress the gzip while it's being streamed into the launchpad directely
+        into a file.
+
+        Returns:
+            The descriptor filename
+        """
+        filename = GzipMultiPartStreamer._get_descriptor_name_from_headers(headers)
+        if filename is None or not filename.endswith(".gz"):
+            return multipart_streamer.TemporaryFileStreamedPart(self, headers)
+
+        return GzipTemporaryFileStreamedPart(self, headers)
+
+
+class RequestHandler(tornado.web.RequestHandler):
+    def options(self, *args, **kargs):
+        pass
+
+    def set_default_headers(self):
+        self.set_header('Access-Control-Allow-Origin', '*')
+        self.set_header('Access-Control-Allow-Headers',
+                        'Content-Type, Cache-Control, Accept, X-Requested-With, Authorization')
+        self.set_header('Access-Control-Allow-Methods', 'POST, GET, PUT, DELETE')
+
+
+@tornado.web.stream_request_body
+class StreamingUploadHandler(RequestHandler):
+    def initialize(self, log, loop):
+        """Initialize the handler
+
+        Arguments:
+            log  - the logger that this handler should use
+            loop - the tasklets ioloop
+
+        """
+        self.transaction_id = str(uuid.uuid4())
+
+        self.loop = loop
+        self.log = self.application.get_logger(self.transaction_id)
+
+        self.part_streamer = None
+
+        self.log.debug('created handler (transaction_id = {})'.format(self.transaction_id))
+
+    def msg_missing_content_type(self):
+        raise NotImplementedError()
+
+    def msg_unsupported_media_type(self):
+        raise NotImplementedError()
+
+    def msg_missing_content_boundary(self):
+        raise NotImplementedError()
+
+    def msg_start(self):
+        raise NotImplementedError()
+
+    def msg_success(self):
+        raise NotImplementedError()
+
+    def msg_failure(self):
+        raise NotImplementedError()
+
+    def msg_package_upload(self):
+        raise NotImplementedError()
+
+    @tornado.gen.coroutine
+    def prepare(self):
+        """Prepare the handler for a request
+
+        The prepare function is the first part of a request transaction. It
+        creates a temporary file that uploaded data can be written to.
+
+        """
+        if self.request.method != "POST":
+            return
+
+        self.request.connection.set_max_body_size(MAX_STREAMED_SIZE)
+
+        self.log.message(self.msg_start())
+
+        try:
+            # Retrieve the content type and parameters from the request
+            content_type = self.request.headers.get('content-type', None)
+            if content_type is None:
+                raise HttpMessageError(400, self.msg_missing_content_type())
+
+            content_type, params = tornado.httputil._parse_header(content_type)
+
+            if "multipart/form-data" != content_type.lower():
+                raise HttpMessageError(415, self.msg_unsupported_media_type())
+
+            if "boundary" not in params:
+                raise HttpMessageError(400, self.msg_missing_content_boundary())
+
+            # You can get the total request size from the headers.
+            try:
+                total = int(self.request.headers.get("Content-Length", "0"))
+            except KeyError:
+                self.log.warning("Content length header not found")
+                # For any well formed browser request, Content-Length should have a value.
+                total = 0
+
+            # And here you create a streamer that will accept incoming data
+            self.part_streamer = GzipMultiPartStreamer(total)
+
+        except HttpMessageError as e:
+            self.log.message(e.msg)
+            self.log.message(self.msg_failure())
+
+            raise tornado.web.HTTPError(e.code, e.msg.name)
+
+        except Exception as e:
+            self.log.exception(e)
+            self.log.message(self.msg_failure())
+
+    @tornado.gen.coroutine
+    def data_received(self, chunk):
+        """Write data to the current file
+
+        Arguments:
+            data - a chunk of data to write to file
+
+        """
+
+        """When a chunk of data is received, we forward it to the multipart streamer."""
+        self.part_streamer.data_received(chunk)
+
+    def post(self):
+        """Handle a post request
+
+        The function is called after any data associated with the body of the
+        request has been received.
+
+        """
+        # You MUST call this to close the incoming stream.
+        self.part_streamer.data_complete()
+
+        desc_parts = self.part_streamer.get_parts_by_name("descriptor")
+        if len(desc_parts) != 1:
+            raise HttpMessageError(400, OnboardError("Descriptor option not found"))
+
+        self.log.message(self.msg_package_upload())
+
+
+class UploadHandler(StreamingUploadHandler):
+    """
+    This handler is used to upload archives that contain VNFDs, NSDs, and PNFDs
+    to the launchpad. This is a streaming handler that writes uploaded archives
+    to disk without loading them all into memory.
+    """
+
+    def msg_missing_content_type(self):
+        return OnboardMissingContentType()
+
+    def msg_unsupported_media_type(self):
+        return OnboardUnsupportedMediaType()
+
+    def msg_missing_content_boundary(self):
+        return OnboardMissingContentBoundary()
+
+    def msg_start(self):
+        return OnboardStart()
+
+    def msg_success(self):
+        return OnboardSuccess()
+
+    def msg_failure(self):
+        return OnboardFailure()
+
+    def msg_package_upload(self):
+        return OnboardPackageUpload()
+
+    def post(self):
+        """Handle a post request
+
+        The function is called after any data associated with the body of the
+        request has been received.
+
+        """
+        try:
+            super().post()
+            self.application.onboard(
+                    self.part_streamer,
+                    self.transaction_id,
+                    auth=self.request.headers.get('authorization', None),
+                    )
+
+            self.set_status(200)
+            self.write(tornado.escape.json_encode({
+                "transaction_id": self.transaction_id,
+                    }))
+
+        except Exception:
+            self.log.exception("Upload POST failed")
+            self.part_streamer.release_parts()
+            raise
+
+
+class UpdateHandler(StreamingUploadHandler):
+    def msg_missing_content_type(self):
+        return UpdateMissingContentType()
+
+    def msg_unsupported_media_type(self):
+        return UpdateUnsupportedMediaType()
+
+    def msg_missing_content_boundary(self):
+        return UpdateMissingContentBoundary()
+
+    def msg_start(self):
+        return UpdateStart()
+
+    def msg_success(self):
+        return UpdateSuccess()
+
+    def msg_failure(self):
+        return UpdateFailure()
+
+    def msg_package_upload(self):
+        return UpdatePackageUpload()
+
+    def post(self):
+        """Handle a post request
+
+        The function is called after any data associated with the body of the
+        request has been received.
+
+        """
+        try:
+            super().post()
+
+            self.application.update(
+                    self.part_streamer,
+                    self.transaction_id,
+                    auth=self.request.headers.get('authorization', None),
+                    )
+
+            self.set_status(200)
+            self.write(tornado.escape.json_encode({
+                "transaction_id": self.transaction_id,
+                    }))
+        except Exception:
+            self.log.exception("Upload POST failed")
+            self.part_streamer.release_parts()
+            raise
+
+
+class UploadStateHandler(state.StateHandler):
+    STARTED = OnboardStart
+    SUCCESS = OnboardSuccess
+    FAILURE = OnboardFailure
+
+
+class UpdateStateHandler(state.StateHandler):
+    STARTED = UpdateStart
+    SUCCESS = UpdateSuccess
+    FAILURE = UpdateFailure
+
+
+class UpdatePackage(threading.Thread):
+    def __init__(self, log, loop, part_streamer, auth,
+                 onboarder, uploader, package_store_map):
+        super().__init__()
+        self.log = log
+        self.loop = loop
+        self.part_streamer = part_streamer
+        self.auth = auth
+        self.onboarder = onboarder
+        self.uploader = uploader
+        self.package_store_map = package_store_map
+
+        self.io_loop = tornado.ioloop.IOLoop.current()
+
+    def _update_package(self):
+        # Extract package could return multiple packages if
+        # the package is converted
+        for pkg in self.extract_package():
+            with pkg as temp_package:
+                package_checksums = self.validate_package(temp_package)
+                stored_package = self.update_package(temp_package)
+
+                try:
+                    self.extract_charms(temp_package)
+                    self.extract_scripts(temp_package)
+                    self.extract_configs(temp_package)
+                    self.extract_icons(temp_package)
+
+                    self.update_descriptors(temp_package)
+
+                except Exception:
+                    self.delete_stored_package(stored_package)
+                    raise
+
+                else:
+                    self.upload_images(temp_package, package_checksums)
+
+    def run(self):
+        try:
+            self._update_package()
+            self.log.message(UpdateSuccess())
+
+        except MessageException as e:
+            self.log.message(e.msg)
+            self.log.message(UpdateFailure())
+
+        except Exception as e:
+            self.log.exception(e)
+            if str(e):
+                self.log.message(UpdateError(str(e)))
+            self.log.message(UpdateFailure())
+
+    def extract_package(self):
+        """Extract multipart message from tarball"""
+        desc_part = self.part_streamer.get_parts_by_name("descriptor")[0]
+
+        # Invoke the move API to prevent the part streamer from attempting
+        # to clean up (the file backed package will do that itself)
+        desc_part.move(desc_part.f_out.name)
+
+        package_name = desc_part.get_filename()
+        package_path = desc_part.f_out.name
+
+        extractor = extract.UploadPackageExtractor(self.log)
+        file_backed_packages = extractor.create_packages_from_upload(
+                package_name, package_path
+                )
+
+        return file_backed_packages
+
+    def get_package_store(self, package):
+        return self.package_store_map[package.descriptor_type]
+
+    def update_package(self, package):
+        store = self.get_package_store(package)
+
+        try:
+            store.update_package(package)
+        except rift.package.store.PackageNotFoundError as e:
+            # If the package doesn't exist, then it is possible the descriptor was onboarded
+            # out of band.  In that case, just store the package as is
+            self.log.warning("Package not found, storing new package instead.")
+            store.store_package(package)
+
+        stored_package = store.get_package(package.descriptor_id)
+
+        return stored_package
+
+    def delete_stored_package(self, package):
+        self.log.info("Deleting stored package: %s", package)
+        store = self.get_package_store(package)
+        try:
+            store.delete_package(package.descriptor_id)
+        except Exception as e:
+            self.log.warning("Failed to delete package from store: %s", str(e))
+
+    def upload_images(self, package, package_checksums):
+        image_file_map = rift.package.image.get_package_image_files(package)
+        name_hdl_map = {name: package.open(image_file_map[name]) for name in image_file_map}
+        if not image_file_map:
+            return
+
+        try:
+            for image_name, image_hdl in name_hdl_map.items():
+                image_file = image_file_map[image_name]
+                if image_file in package_checksums:
+                    image_checksum = package_checksums[image_file]
+                else:
+                    self.log.warning("checksum not provided for image %s.  Calculating checksum",
+                                     image_file)
+                    image_checksum = rift.package.checksums.checksum(
+                            package.open(image_file_map[image_name])
+                            )
+                try:
+                    self.uploader.upload_image(image_name, image_checksum, image_hdl)
+                    self.uploader.upload_image_to_cloud_accounts(image_name, image_checksum)
+
+                except image.ImageUploadError as e:
+                    self.log.exception("Failed to upload image: %s", image_name)
+                    raise MessageException(OnboardImageUploadError(str(e))) from e
+
+        finally:
+            _ = [image_hdl.close() for image_hdl in name_hdl_map.values()]
+
+
+    def extract_charms(self, package):
+        try:
+            charm_extractor = rift.package.charm.PackageCharmExtractor(self.log)
+            charm_extractor.extract_charms(package)
+        except rift.package.charm.CharmExtractionError as e:
+            raise MessageException(UpdateExtractionError()) from e
+
+    def extract_scripts(self, package):
+        try:
+            script_extractor = rift.package.script.PackageScriptExtractor(self.log)
+            script_extractor.extract_scripts(package)
+        except rift.package.script.ScriptExtractionError as e:
+            raise MessageException(UpdateExtractionError()) from e
+
+    def extract_configs(self, package):
+        try:
+            config_extractor = rift.package.config.PackageConfigExtractor(self.log)
+            config_extractor.extract_configs(package)
+        except rift.package.config.ConfigExtractionError as e:
+            raise MessageException(UpdateExtractionError()) from e
+
+    def extract_icons(self, package):
+        try:
+            icon_extractor = rift.package.icon.PackageIconExtractor(self.log)
+            icon_extractor.extract_icons(package)
+        except rift.package.icon.IconExtractionError as e:
+            raise MessageException(UpdateExtractionError()) from e
+
+    def validate_package(self, package):
+        checksum_validator = rift.package.package.PackageChecksumValidator(self.log)
+
+        try:
+            file_checksums = checksum_validator.validate(package)
+        except rift.package.package.PackageFileChecksumError as e:
+            raise MessageException(UpdateChecksumMismatch(e.filename)) from e
+        except rift.package.package.PackageValidationError as e:
+            raise MessageException(UpdateUnreadablePackage()) from e
+
+        return file_checksums
+
+    def update_descriptors(self, package):
+        descriptor_msg = package.descriptor_msg
+
+        self.log.message(UpdateDescriptorUpdate())
+
+        try:
+            self.onboarder.update(descriptor_msg)
+        except onboard.UpdateError as e:
+            raise MessageException(UpdateDescriptorError(package.descriptor_file)) from e
+
+
+class OnboardPackage(threading.Thread):
+    def __init__(self, log, loop, part_streamer, auth,
+                 onboarder, uploader, package_store_map):
+        super().__init__()
+        self.log = log
+        self.loop = loop
+        self.part_streamer = part_streamer
+        self.auth = auth
+        self.onboarder = onboarder
+        self.uploader = uploader
+        self.package_store_map = package_store_map
+
+        self.io_loop = tornado.ioloop.IOLoop.current()
+
+    def _onboard_package(self):
+        # Extract package could return multiple packages if
+        # the package is converted
+        for pkg in self.extract_package():
+            with pkg as temp_package:
+                package_checksums = self.validate_package(temp_package)
+                stored_package = self.store_package(temp_package)
+
+                try:
+                    self.extract_charms(temp_package)
+                    self.extract_scripts(temp_package)
+                    self.extract_configs(temp_package)
+                    self.extract_icons(temp_package)
+
+                    self.onboard_descriptors(temp_package)
+
+                except Exception:
+                    self.delete_stored_package(stored_package)
+                    raise
+
+                else:
+                    self.upload_images(temp_package, package_checksums)
+
+    def run(self):
+        try:
+            self._onboard_package()
+            self.log.message(OnboardSuccess())
+
+        except MessageException as e:
+            self.log.message(e.msg)
+            self.log.message(OnboardFailure())
+
+        except Exception as e:
+            self.log.exception(e)
+            if str(e):
+                self.log.message(OnboardError(str(e)))
+            self.log.message(OnboardFailure())
+
+        finally:
+            self.part_streamer.release_parts()
+
+    def extract_package(self):
+        """Extract multipart message from tarball"""
+        desc_part = self.part_streamer.get_parts_by_name("descriptor")[0]
+
+        # Invoke the move API to prevent the part streamer from attempting
+        # to clean up (the file backed package will do that itself)
+        desc_part.move(desc_part.f_out.name)
+
+        package_name = desc_part.get_filename()
+        package_path = desc_part.f_out.name
+
+        extractor = extract.UploadPackageExtractor(self.log)
+        file_backed_packages = extractor.create_packages_from_upload(
+                package_name, package_path
+                )
+
+        return file_backed_packages
+
+    def get_package_store(self, package):
+        return self.package_store_map[package.descriptor_type]
+
+    def store_package(self, package):
+        store = self.get_package_store(package)
+
+        try:
+            store.store_package(package)
+        except rift.package.store.PackageExistsError as e:
+            store.update_package(package)
+
+        stored_package = store.get_package(package.descriptor_id)
+
+        return stored_package
+
+    def delete_stored_package(self, package):
+        self.log.info("Deleting stored package: %s", package)
+        store = self.get_package_store(package)
+        try:
+            store.delete_package(package.descriptor_id)
+        except Exception as e:
+            self.log.warning("Failed to delete package from store: %s", str(e))
+
+    def upload_images(self, package, package_checksums):
+        image_file_map = rift.package.image.get_package_image_files(package)
+        if not image_file_map:
+            return
+
+        name_hdl_map = {name: package.open(image_file_map[name]) for name in image_file_map}
+        try:
+            for image_name, image_hdl in name_hdl_map.items():
+                image_file = image_file_map[image_name]
+                if image_file in package_checksums:
+                    image_checksum = package_checksums[image_file]
+                else:
+                    self.log.warning("checksum not provided for image %s.  Calculating checksum",
+                                     image_file)
+                    image_checksum = rift.package.checksums.checksum(
+                            package.open(image_file_map[image_name])
+                            )
+                try:
+                    self.uploader.upload_image(image_name, image_checksum, image_hdl)
+                    self.uploader.upload_image_to_cloud_accounts(image_name, image_checksum)
+
+                except image.ImageUploadError as e:
+                    raise MessageException(OnboardImageUploadError()) from e
+
+        finally:
+            _ = [image_hdl.close() for image_hdl in name_hdl_map.values()]
+
+    def extract_charms(self, package):
+        try:
+            charm_extractor = rift.package.charm.PackageCharmExtractor(self.log)
+            charm_extractor.extract_charms(package)
+        except rift.package.charm.CharmExtractionError as e:
+            raise MessageException(OnboardExtractionError()) from e
+
+    def extract_scripts(self, package):
+        try:
+            script_extractor = rift.package.script.PackageScriptExtractor(self.log)
+            script_extractor.extract_scripts(package)
+        except rift.package.script.ScriptExtractionError as e:
+            raise MessageException(OnboardExtractionError()) from e
+
+    def extract_configs(self, package):
+        try:
+            config_extractor = rift.package.config.PackageConfigExtractor(self.log)
+            config_extractor.extract_configs(package)
+        except rift.package.config.ConfigExtractionError as e:
+            raise MessageException(OnboardExtractionError()) from e
+
+    def extract_icons(self, package):
+        try:
+            icon_extractor = rift.package.icon.PackageIconExtractor(self.log)
+            icon_extractor.extract_icons(package)
+        except rift.package.icon.IconExtractionError as e:
+            raise MessageException(OnboardExtractionError()) from e
+
+    def validate_package(self, package):
+        checksum_validator = rift.package.package.PackageChecksumValidator(self.log)
+
+        try:
+            file_checksums = checksum_validator.validate(package)
+        except rift.package.package.PackageFileChecksumError as e:
+            raise MessageException(OnboardChecksumMismatch(e.filename)) from e
+        except rift.package.package.PackageValidationError as e:
+            raise MessageException(OnboardUnreadablePackage()) from e
+
+        return file_checksums
+
+    def onboard_descriptors(self, package):
+        descriptor_msg = package.descriptor_msg
+
+        self.log.message(OnboardDescriptorOnboard())
+
+        try:
+            self.onboarder.onboard(descriptor_msg)
+        except onboard.OnboardError as e:
+            raise MessageException(OnboardDescriptorError(package.descriptor_file)) from e
+
+
+class UploaderApplication(tornado.web.Application):
+    def __init__(self, tasklet):
+        self.tasklet = tasklet
+        self.accounts = []
+        self.messages = collections.defaultdict(list)
+        self.export_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/exports')
+
+        manifest = tasklet.tasklet_info.get_pb_manifest()
+        self.use_ssl = manifest.bootstrap_phase.rwsecurity.use_ssl
+        self.ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
+        self.ssl_key = manifest.bootstrap_phase.rwsecurity.key
+
+        self.uploader = image.ImageUploader(self.log, self.loop, tasklet.dts)
+        self.onboarder = onboard.DescriptorOnboarder(
+                self.log, "127.0.0.1", 8008, self.use_ssl, self.ssl_cert, self.ssl_key
+                )
+        self.package_store_map = {
+                "vnfd": self.tasklet.vnfd_package_store,
+                "nsd": self.tasklet.nsd_package_store,
+                }
+
+        self.exporter = export.DescriptorPackageArchiveExporter(self.log)
+        self.loop.create_task(export.periodic_export_cleanup(self.log, self.loop, self.export_dir))
+
+        attrs = dict(log=self.log, loop=self.loop)
+
+        export_attrs = attrs.copy()
+        export_attrs.update({
+            "store_map": self.package_store_map,
+            "exporter": self.exporter,
+            "catalog_map": {
+                "vnfd": self.vnfd_catalog,
+                "nsd": self.nsd_catalog
+                }
+            })
+
+        super(UploaderApplication, self).__init__([
+            (r"/api/update", UpdateHandler, attrs),
+            (r"/api/upload", UploadHandler, attrs),
+
+            (r"/api/upload/([^/]+)/state", UploadStateHandler, attrs),
+            (r"/api/update/([^/]+)/state", UpdateStateHandler, attrs),
+            (r"/api/export/([^/]+)/state", export.ExportStateHandler, attrs),
+
+            (r"/api/export/(nsd|vnfd)$", export.ExportHandler, export_attrs),
+            (r"/api/export/([^/]+.tar.gz)", tornado.web.StaticFileHandler, {
+                "path": self.export_dir,
+                }),
+            (r"/api/export/([^/]+.zip)", tornado.web.StaticFileHandler, {
+                "path": self.export_dir,
+                }),
+            ])
+
+    @property
+    def log(self):
+        return self.tasklet.log
+
+    @property
+    def loop(self):
+        return self.tasklet.loop
+
+    def get_logger(self, transaction_id):
+        return message.Logger(self.log, self.messages[transaction_id])
+
+    def onboard(self, part_streamer, transaction_id, auth=None):
+        log = message.Logger(self.log, self.messages[transaction_id])
+
+        OnboardPackage(
+                log,
+                self.loop,
+                part_streamer,
+                auth,
+                self.onboarder,
+                self.uploader,
+                self.package_store_map,
+                ).start()
+
+    def update(self, part_streamer, transaction_id, auth=None):
+        log = message.Logger(self.log, self.messages[transaction_id])
+
+        UpdatePackage(
+                log,
+                self.loop,
+                part_streamer,
+                auth,
+                self.onboarder,
+                self.uploader,
+                self.package_store_map,
+                ).start()
+
+    @property
+    def vnfd_catalog(self):
+        return self.tasklet.vnfd_catalog
+
+    @property
+    def nsd_catalog(self):
+        return self.tasklet.nsd_catalog
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rwlaunchpad.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rwlaunchpad.py
new file mode 100755
index 0000000..3dcd549
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rwlaunchpad.py
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwlaunchpad
+
+class Tasklet(rift.tasklets.rwlaunchpad.LaunchpadTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/CMakeLists.txt b/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/CMakeLists.txt
new file mode 100644
index 0000000..549af43
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/CMakeLists.txt
@@ -0,0 +1,25 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 05/15/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+install(PROGRAMS rwlaunchpad
+        DESTINATION usr/bin
+        COMPONENT ${INSTALL_COMPONENT}
+        )
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/Makefile b/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/rwlaunchpad b/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/rwlaunchpad
new file mode 100755
index 0000000..21a06b7
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/rwlaunchpad
@@ -0,0 +1,144 @@
+#!/bin/bash
+
+# Script details
+SCRIPTNAME=`basename $0`
+SCRIPT=$0
+SCRIPT_ARGS=${@}
+
+# Initialize some of the variables
+if [ "$RIFT_LP_ADDR" = "" ]; then
+  RIFT_LP_ADDR="localhost"
+fi
+PKGS=()
+INSTANTIATE=0
+DESC_ID=""
+NS_NAME=""
+LOGGING=0
+RIFT_LP_PKG_UPLOAD_URL="https://${RIFT_LP_ADDR}:4567/api/upload"
+
+######################################################################
+#  Function:usage                                                    #
+#           Prints usage                                             #
+######################################################################
+function usage() {
+  cat <<EOF
+  usage $SCRIPTNAME [-h] [-r launchpad-ip][-u upload-package][-i ns-service-name [-d descriptor-id]][-l]
+       -h : show this message
+       -r : launchpad ip address  -  defaults to RIFT_LP_ADDR enviroment variable
+       -u : upload package with the package name specified
+       -i : Instantiate a network service with network service name
+       -d : Instantiate a network service with the specified descriptor
+       -l : Log to file
+EOF
+}
+
+######################################################################
+#  Function:validate_args                                            #
+#           Validates the passed arguments                           #
+######################################################################
+function validate_args () {
+  if [ "$RIFT_LP_ADDR" = "" ]; then
+    echo "RIFT LP address must be specified - set RIFT_LP_ADDR or specify -l option"
+    usage
+    exit 1
+  fi
+  if [ "${#PKGS[@]}" -eq 0 -a "${INSTANTIATE}" -eq 0 ]; then
+    echo "One of -u or -i option must be specified"
+    usage
+    exit 1
+  fi
+  if [ "${INSTANTIATE}" -eq 1 ]; then
+    if [ "${NS_NAME}" = "" -o "${DESC_ID}" = "" ]; then
+      echo "Must specify both descriptor id and ns service name when -i is specified"
+      usage
+      exit 1
+    fi
+  fi
+}
+
+######################################################################
+#  Function:upload_pacakage                                          #
+#           Uploads a package with the passed argument               #
+######################################################################
+function upload_package() {
+  if [ -z "$1" ]; then
+    echo "upload_package: package name should be passed in as an argument"
+    usage
+    exit 1
+  fi
+  PACKAGE=$1
+  curl --insecure -F "descriptor=@${PACKAGE}" ${RIFT_LP_PKG_UPLOAD_URL}
+}
+
+######################################################################
+#  Function:instantiate_ns                                           #
+#           Instantiates a netork service                            #
+######################################################################
+function instantiate_ns() {
+  echo "instantiate_ns need implementation"
+}
+
+
+while getopts ":hl:r:u:i:n:" OPTION
+do
+    case $OPTION in
+        h)
+            usage
+            exit 1
+            ;;
+        r)
+            RIFT_LP_ADDR=$OPTARG
+            RIFT_LP_PKG_UPLOAD_URL="https://${RIFT_LP_ADDR}:4567/api/upload"
+            ;;
+        u)
+            PKGS+=($OPTARG)
+            ;;
+        i)
+            INSTANTIATE=1
+            NS_NAME=$OPTARG
+            ;;
+        n)
+            DESC_ID=$OPTARG
+            ;;
+        l)
+            LOGGING=1
+            ;;
+        *)
+            usage
+            exit 1
+            ;;
+    esac
+done
+
+shift $((OPTIND-1))
+
+validate_args
+
+if [ $LOGGING -eq 1 ]; then
+    LOGDIR="/tmp"
+    LOGFILE="$LOGDIR/$SCRIPTNAME-$DATE.log"
+    echo "Logging to file $LOGFILE"
+
+    # Redirect stdout ( > ) and stderr to file
+    # and store the STDOUT and STDERR for later use
+    exec 3>&1
+    exec 4>&2
+    exec >$LOGFILE
+    exec 2>&1
+fi
+
+echo "Started at $DATE"
+
+# Iterate through the packages and upload them
+for PKG in "${PKGS[@]}"
+do
+  echo "Uploading package $PKG"
+  upload_package $PKG
+  echo ""
+done
+
+if [ "${INSTANTIATE}" -eq 1 ]; then
+  instantiate_ns $DESC_ID
+fi
+
+echo "Ended at $DATE"
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/CMakeLists.txt b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/CMakeLists.txt
new file mode 100644
index 0000000..8f2e904
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/CMakeLists.txt
@@ -0,0 +1,38 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 04/22/2016
+# 
+
+rift_py3test(utest_serializer
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_serializer.py
+  )
+
+rift_py3test(utest_export
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_export.py
+  )
+
+rift_py3test(utest_onboard
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_onboard.py
+  )
+
+rift_py3test(utest_package.py
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_package.py
+  )
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/run_tests.sh b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/run_tests.sh
new file mode 100755
index 0000000..3efa1fc
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/run_tests.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+python3 -m unittest discover --pattern=*.py
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_export.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_export.py
new file mode 100755
index 0000000..7a787c7
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_export.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import asyncio
+import logging
+import io
+import os
+import sys
+import tarfile
+import tempfile
+import time
+import unittest
+import uuid
+import xmlrunner
+
+import rift.package.archive
+import rift.package.charm
+import rift.package.checksums
+import rift.package.config
+import rift.package.convert
+import rift.package.icon
+import rift.package.package
+import rift.package.script
+import rift.package.store
+
+from rift.tasklets.rwlaunchpad import export
+
+import gi
+gi.require_version('RwVnfdYang', '1.0')
+from gi.repository import (
+        RwVnfdYang,
+        VnfdYang,
+        )
+
+import utest_package
+
+
+class TestExport(utest_package.PackageTestCase):
+    def setUp(self):
+        super().setUp()
+        self._exporter = export.DescriptorPackageArchiveExporter(self._log)
+        self._rw_vnfd_serializer = rift.package.convert.RwVnfdSerializer()
+        self._vnfd_serializer = rift.package.convert.VnfdSerializer()
+
+    def test_create_archive(self):
+        rw_vnfd_msg = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd(
+                id="new_id", name="new_name", description="new_description"
+                )
+        json_desc_str = self._rw_vnfd_serializer.to_json_string(rw_vnfd_msg)
+
+        vnfd_package = self.create_vnfd_package()
+        with io.BytesIO() as archive_hdl:
+            archive = self._exporter.create_archive(
+                    archive_hdl, vnfd_package, json_desc_str, self._rw_vnfd_serializer
+                    )
+
+            archive_hdl.seek(0)
+
+            # Create a new read-only archive from the archive handle and a package from that archive
+            archive = rift.package.archive.TarPackageArchive(self._log, archive_hdl)
+            package = archive.create_package()
+
+            # Ensure that the descriptor in the package has been overwritten
+            self.assertEqual(package.descriptor_msg, rw_vnfd_msg)
+
+    def test_export_package(self):
+        rw_vnfd_msg = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd(
+                id="new_id", name="new_name", description="new_description",
+                meta="THIS FIELD IS NOT IN REGULAR VNFD"
+                )
+        vnfd_msg = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+        vnfd_msg.from_dict(rw_vnfd_msg.as_dict(), ignore_missing_keys=True)
+
+        self.assertNotEqual(rw_vnfd_msg, vnfd_msg)
+
+        json_desc_str = self._rw_vnfd_serializer.to_json_string(rw_vnfd_msg)
+
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            vnfd_package = self.create_vnfd_package()
+            pkg_id = str(uuid.uuid4())
+            exported_path = self._exporter.export_package(
+                    vnfd_package, tmp_dir, pkg_id, json_desc_str, self._vnfd_serializer
+                    )
+
+            self.assertTrue(os.path.isfile(exported_path))
+            self.assertTrue(tarfile.is_tarfile(exported_path))
+
+            with open(exported_path, "rb") as archive_hdl:
+                archive = rift.package.archive.TarPackageArchive(self._log, archive_hdl)
+                package = archive.create_package()
+
+                self.assertEqual(package.descriptor_msg, vnfd_msg)
+
+    def test_export_cleanup(self):
+        loop = asyncio.get_event_loop()
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            archive_files = [tempfile.mkstemp(dir=tmp_dir, suffix=".tar.gz")[1] for _ in range(2)]
+
+            # Set the mtime on only one of the files to test the min_age_secs argument
+            times = (time.time(), time.time() - 10)
+            os.utime(archive_files[0], times)
+
+            task = loop.create_task(
+                    export.periodic_export_cleanup(
+                        self._log, loop, tmp_dir, period_secs=.01, min_age_secs=5
+                        )
+                    )
+            loop.run_until_complete(asyncio.sleep(.05, loop=loop))
+
+            if task.done() and task.exception() is not None:
+                raise task.exception()
+
+            self.assertFalse(task.done())
+
+            self.assertFalse(os.path.exists(archive_files[0]))
+            self.assertTrue(os.path.exists(archive_files[1]))
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_onboard.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_onboard.py
new file mode 100755
index 0000000..871132f
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_onboard.py
@@ -0,0 +1,294 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import asyncio
+import base64
+import concurrent.futures
+import io
+import logging
+import os
+import sys
+import tornado.testing
+import tornado.web
+import unittest
+import uuid
+import xmlrunner
+
+from rift.package import convert
+from rift.tasklets.rwlaunchpad import onboard
+import rift.test.dts
+
+import gi
+gi.require_version('NsdYang', '1.0')
+gi.require_version('VnfdYang', '1.0')
+
+from gi.repository import (
+        NsdYang,
+        VnfdYang,
+        )
+
+
+class RestconfDescriptorHandler(tornado.web.RequestHandler):
+    DESC_SERIALIZER_MAP = {
+            "nsd": convert.NsdSerializer(),
+            "vnfd": convert.VnfdSerializer(),
+            }
+
+    class AuthError(Exception):
+        pass
+
+
+    class ContentTypeError(Exception):
+        pass
+
+
+    class RequestBodyError(Exception):
+        pass
+
+
+    def initialize(self, log, auth, info):
+        self._auth = auth
+        # The superclass has self._log already defined so use a different name
+        self._logger = log
+        self._info = info
+        self._logger.debug('Created restconf descriptor handler')
+
+    def _verify_auth(self):
+        if self._auth is None:
+            return None
+
+        auth_header = self.request.headers.get('Authorization')
+        if auth_header is None or not auth_header.startswith('Basic '):
+            self.set_status(401)
+            self.set_header('WWW-Authenticate', 'Basic realm=Restricted')
+            self._transforms = []
+            self.finish()
+
+            msg = "Missing Authorization header"
+            self._logger.error(msg)
+            raise RestconfDescriptorHandler.AuthError(msg)
+
+        auth_header = auth_header.encode('ascii')
+        auth_decoded = base64.decodebytes(auth_header[6:]).decode()
+        login, password = auth_decoded.split(':', 2)
+        login = login
+        password = password
+        is_auth = ((login, password) == self._auth)
+
+        if not is_auth:
+            self.set_status(401)
+            self.set_header('WWW-Authenticate', 'Basic realm=Restricted')
+            self._transforms = []
+            self.finish()
+
+            msg = "Incorrect username and password in auth header: got {}, expected {}".format(
+                    (login, password), self._auth
+                    )
+            self._logger.error(msg)
+            raise RestconfDescriptorHandler.AuthError(msg)
+
+    def _verify_content_type_header(self):
+        content_type_header = self.request.headers.get('content-type')
+        if content_type_header is None:
+            self.set_status(415)
+            self._transforms = []
+            self.finish()
+
+            msg = "Missing content-type header"
+            self._logger.error(msg)
+            raise RestconfDescriptorHandler.ContentTypeError(msg)
+
+        if content_type_header != "application/vnd.yang.data+json":
+            self.set_status(415)
+            self._transforms = []
+            self.finish()
+
+            msg = "Unsupported content type: %s" % content_type_header
+            self._logger.error(msg)
+            raise RestconfDescriptorHandler.ContentTypeError(msg)
+
+    def _verify_headers(self):
+        self._verify_auth()
+        self._verify_content_type_header()
+
+    def _verify_request_body(self, descriptor_type):
+        if descriptor_type not in RestconfDescriptorHandler.DESC_SERIALIZER_MAP:
+            raise ValueError("Unsupported descriptor type: %s" % descriptor_type)
+
+        body = self.request.body
+        bytes_hdl = io.BytesIO(body)
+
+        serializer = RestconfDescriptorHandler.DESC_SERIALIZER_MAP[descriptor_type]
+
+        try:
+            message = serializer.from_file_hdl(bytes_hdl, ".json")
+        except convert.SerializationError as e:
+            self.set_status(400)
+            self._transforms = []
+            self.finish()
+
+            msg = "Descriptor request body not valid"
+            self._logger.error(msg)
+            raise RestconfDescriptorHandler.RequestBodyError() from e
+
+        self._info.last_request_message = message
+
+        self._logger.debug("Received a valid descriptor request")
+
+    def put(self, descriptor_type):
+        self._info.last_descriptor_type = descriptor_type
+        self._info.last_method = "PUT"
+
+        try:
+            self._verify_headers()
+        except (RestconfDescriptorHandler.AuthError,
+                RestconfDescriptorHandler.ContentTypeError):
+            return None
+
+        try:
+            self._verify_request_body(descriptor_type)
+        except RestconfDescriptorHandler.RequestBodyError:
+            return None
+
+        self.write("Response doesn't matter?")
+
+    def post(self, descriptor_type):
+        self._info.last_descriptor_type = descriptor_type
+        self._info.last_method = "POST"
+
+        try:
+            self._verify_headers()
+        except (RestconfDescriptorHandler.AuthError,
+                RestconfDescriptorHandler.ContentTypeError):
+            return None
+
+        try:
+            self._verify_request_body(descriptor_type)
+        except RestconfDescriptorHandler.RequestBodyError:
+            return None
+
+        self.write("Response doesn't matter?")
+
+
+class HandlerInfo(object):
+    def __init__(self):
+        self.last_request_message = None
+        self.last_descriptor_type = None
+        self.last_method = None
+
+
+class OnboardTestCase(tornado.testing.AsyncHTTPTestCase):
+    AUTH = ("admin", "admin")
+    def setUp(self):
+        self._log = logging.getLogger(__file__)
+        self._loop = asyncio.get_event_loop()
+
+        self._handler_info = HandlerInfo()
+        super().setUp()
+        self._port = self.get_http_port()
+        self._onboarder = onboard.DescriptorOnboarder(
+                log=self._log, port=self._port
+                )
+
+    def get_new_ioloop(self):
+        return tornado.platform.asyncio.AsyncIOMainLoop()
+
+    def get_app(self):
+        attrs = dict(auth=OnboardTestCase.AUTH, log=self._log, info=self._handler_info)
+        return tornado.web.Application([
+            (r"/api/config/.*/(nsd|vnfd)", RestconfDescriptorHandler, attrs),
+            ])
+
+    @rift.test.dts.async_test
+    def test_onboard_nsd(self):
+        nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+        yield from self._loop.run_in_executor(None, self._onboarder.onboard, nsd_msg)
+        self.assertEqual(self._handler_info.last_request_message, nsd_msg)
+        self.assertEqual(self._handler_info.last_descriptor_type, "nsd")
+        self.assertEqual(self._handler_info.last_method, "POST")
+
+    @rift.test.dts.async_test
+    def test_update_nsd(self):
+        nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+        yield from self._loop.run_in_executor(None, self._onboarder.update, nsd_msg)
+        self.assertEqual(self._handler_info.last_request_message, nsd_msg)
+        self.assertEqual(self._handler_info.last_descriptor_type, "nsd")
+        self.assertEqual(self._handler_info.last_method, "PUT")
+
+    @rift.test.dts.async_test
+    def test_bad_descriptor_type(self):
+        nsd_msg = NsdYang.YangData_Nsd_NsdCatalog()
+        with self.assertRaises(TypeError):
+            yield from self._loop.run_in_executor(None, self._onboarder.update, nsd_msg)
+
+        with self.assertRaises(TypeError):
+            yield from self._loop.run_in_executor(None, self._onboarder.onboard, nsd_msg)
+
+    @rift.test.dts.async_test
+    def test_bad_port(self):
+        # Use a port not used by the instantiated server
+        new_port = self._port - 1
+        self._onboarder.port = new_port
+        nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+
+        with self.assertRaises(onboard.OnboardError):
+            yield from self._loop.run_in_executor(None, self._onboarder.onboard, nsd_msg)
+
+        with self.assertRaises(onboard.UpdateError):
+            yield from self._loop.run_in_executor(None, self._onboarder.update, nsd_msg)
+
+    @rift.test.dts.async_test
+    def test_timeout(self):
+        # Set the timeout to something minimal to speed up test
+        self._onboarder.timeout = .1
+
+        nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+
+        # Force the request to timeout by running the call synchronously so the
+        with self.assertRaises(onboard.OnboardError):
+            self._onboarder.onboard(nsd_msg)
+
+        # Force the request to timeout by running the call synchronously so the
+        with self.assertRaises(onboard.UpdateError):
+            self._onboarder.update(nsd_msg)
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_package.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_package.py
new file mode 100755
index 0000000..1efd2df
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_package.py
@@ -0,0 +1,480 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import logging
+import io
+import json
+import os
+import sys
+import tarfile
+import tempfile
+import unittest
+import xmlrunner
+import yaml
+
+import rift.package.archive
+import rift.package.package
+import rift.package.charm
+import rift.package.icon
+import rift.package.script
+import rift.package.config
+import rift.package.store
+import rift.package.checksums
+import rift.package.cloud_init
+
+
+import gi
+gi.require_version('RwpersonDbYang', '1.0')
+gi.require_version('RwYang', '1.0')
+
+from gi.repository import (
+        RwpersonDbYang,
+        RwYang,
+        )
+
+
+nsd_yaml = b"""nsd:nsd-catalog:
+  nsd:nsd:
+  - nsd:id: gw_corpA
+    nsd:name: gw_corpA
+    nsd:description: Gateways to access as corpA to PE1 and PE2
+"""
+
+vnfd_yaml = b"""vnfd:vnfd-catalog:
+  vnfd:vnfd:
+  - vnfd:id: gw_corpA_vnfd
+    vnfd:name: gw_corpA_vnfd
+    vnfd:description: Gateways to access as corpA to PE1 and PE2
+"""
+
+nsd_filename = "gw_corpA__nsd.yaml"
+vnfd_filename = "gw_corpA__vnfd.yaml"
+
+
+def file_hdl_md5(file_hdl):
+    return rift.package.checksums.checksum(file_hdl)
+
+
+class ArchiveTestCase(unittest.TestCase):
+    def setUp(self):
+        self._log = logging.getLogger()
+
+        self._tar_file_hdl = io.BytesIO()
+        self._tar = tarfile.open(fileobj=self._tar_file_hdl, mode="w|gz")
+
+        self._nsd_yaml_hdl = io.BytesIO(nsd_yaml)
+        self._vnfd_yaml_hdl = io.BytesIO(vnfd_yaml)
+
+    def tearDown(self):
+        self._nsd_yaml_hdl.close()
+        self._vnfd_yaml_hdl.close()
+        self._tar.close()
+        self._tar_file_hdl.close()
+
+    def create_tar_package_archive(self):
+        self._tar.close()
+        self._tar_file_hdl.flush()
+        self._tar_file_hdl.seek(0)
+        archive = rift.package.package.TarPackageArchive(
+                log=self._log,
+                tar_file_hdl=self._tar_file_hdl,
+                )
+
+        return archive
+
+    def add_tarinfo(self, name, file_hdl, mode=0o777):
+        tarinfo = tarfile.TarInfo(name)
+        tarinfo.size = len(file_hdl.read())
+        assert tarinfo.size > 0
+        file_hdl.seek(0)
+        self._tar.addfile(tarinfo, file_hdl)
+
+    def add_tarinfo_dir(self, name):
+        tarinfo = tarfile.TarInfo(name)
+        tarinfo.type = tarfile.DIRTYPE
+        self._tar.addfile(tarinfo)
+
+    def add_nsd_yaml(self):
+        self.add_tarinfo(nsd_filename, io.BytesIO(nsd_yaml))
+
+    def add_vnfd_yaml(self):
+        self.add_tarinfo(vnfd_filename, io.BytesIO(vnfd_yaml))
+
+
+class PackageTestCase(ArchiveTestCase):
+    def create_nsd_package(self):
+        self.add_nsd_yaml()
+        archive = self.create_tar_package_archive()
+        package = archive.create_package()
+
+        return package
+
+    def create_vnfd_package(self):
+        self.add_vnfd_yaml()
+        archive = self.create_tar_package_archive()
+        package = archive.create_package()
+
+        return package
+
+
+class TestCreateArchive(ArchiveTestCase):
+    def test_create_tar_archive(self):
+        self.add_nsd_yaml()
+        archive = self.create_tar_package_archive()
+        self.assertEquals(set(archive.filenames), {nsd_filename})
+
+    def test_nsd_tar_archive(self):
+        #Write the NSD YAML to the tar file
+        self.add_nsd_yaml()
+
+        archive = self.create_tar_package_archive()
+        with archive.open_file(nsd_filename) as nsd_hdl:
+            nsd_bytes = nsd_hdl.read()
+
+        self.assertEquals(nsd_bytes, nsd_yaml)
+
+
+class TestPackage(PackageTestCase):
+    def create_vnfd_package_archive(self, package, hdl):
+        # Create an archive from a package
+        archive = rift.package.archive.TarPackageArchive.from_package(
+                self._log, package, hdl,
+                )
+        # Closing the archive writes any closing bytes to the file handle
+        archive.close()
+        hdl.seek(0)
+
+        return archive
+
+    def test_create_nsd_package_from_archive(self):
+        package = self.create_nsd_package()
+        self.assertTrue(isinstance(package, rift.package.package.NsdPackage))
+
+        json_str = package.json_descriptor
+        desc_dict = json.loads(json_str)
+        self.assertIn("nsd:nsd-catalog", desc_dict)
+
+    def test_create_vnfd_package_from_archive(self):
+        package = self.create_vnfd_package()
+        self.assertTrue(isinstance(package, rift.package.package.VnfdPackage))
+
+        json_str = package.json_descriptor
+        desc_dict = json.loads(json_str)
+        self.assertIn("vnfd:vnfd-catalog", desc_dict)
+
+    def test_create_vnfd_archive_from_package(self):
+        package = self.create_vnfd_package()
+        hdl = io.BytesIO()
+        self.create_vnfd_package_archive(package, hdl)
+
+        # Ensure that the archive created was valid
+        with tarfile.open(fileobj=hdl, mode='r|gz'):
+            pass
+
+    def test_round_trip_vnfd_package_from_archive(self):
+        package = self.create_vnfd_package()
+        hdl = io.BytesIO()
+        self.create_vnfd_package_archive(package, hdl)
+
+        archive = rift.package.archive.TarPackageArchive(self._log, hdl)
+        def md5(file_hdl):
+            return rift.package.checksums.checksum(file_hdl)
+
+        # Create the package from the archive and validate file checksums and modes
+        new_package = archive.create_package()
+
+        self.assertEqual(package.files, new_package.files)
+        self.assertEqual(type(package), type(new_package))
+
+        for filename in package.files:
+            pkg_file = package.open(filename)
+            new_pkg_file = new_package.open(filename)
+            self.assertEqual(md5(pkg_file), md5(new_pkg_file))
+
+    def test_create_nsd_package_from_file(self):
+        nsd_file_name = "asdf_nsd.yaml"
+        hdl = io.BytesIO(nsd_yaml)
+        hdl.name = nsd_file_name
+
+        package = rift.package.package.DescriptorPackage.from_descriptor_file_hdl(
+                self._log, hdl
+                )
+        self.assertTrue(isinstance(package, rift.package.package.NsdPackage))
+
+        with package.open(nsd_file_name) as nsd_hdl:
+            nsd_data = nsd_hdl.read()
+            self.assertEquals(yaml.load(nsd_data), yaml.load(nsd_yaml))
+
+    def test_create_vnfd_package_from_file(self):
+        vnfd_file_name = "asdf_vnfd.yaml"
+        hdl = io.BytesIO(vnfd_yaml)
+        hdl.name = vnfd_file_name
+
+        package = rift.package.package.DescriptorPackage.from_descriptor_file_hdl(
+                self._log, hdl
+                )
+        self.assertTrue(isinstance(package, rift.package.package.VnfdPackage))
+
+        with package.open(vnfd_file_name) as vnfd_hdl:
+            vnfd_data = vnfd_hdl.read()
+            self.assertEquals(yaml.load(vnfd_data), yaml.load(vnfd_yaml))
+
+
+class TestPackageCharmExtractor(PackageTestCase):
+    def add_charm_dir(self, charm_name):
+        charm_dir = "charms/trusty/{}".format(charm_name)
+        charm_file = "{}/actions.yaml".format(charm_dir)
+        charm_text = b"THIS IS A FAKE CHARM"
+        self.add_tarinfo_dir(charm_dir)
+        self.add_tarinfo(charm_file, io.BytesIO(charm_text))
+
+    def test_extract_charm(self):
+        charm_name = "charm_a"
+        self.add_charm_dir(charm_name)
+        package = self.create_vnfd_package()
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            extractor = rift.package.charm.PackageCharmExtractor(self._log, tmp_dir)
+            extractor.extract_charms(package)
+
+            charm_dir = extractor.get_extracted_charm_dir(package.descriptor_id, charm_name)
+            self.assertTrue(os.path.exists(charm_dir))
+            self.assertTrue(os.path.isdir(charm_dir))
+
+
+class TestPackageIconExtractor(PackageTestCase):
+    def add_icon_file(self, icon_name):
+        icon_file = "icons/{}".format(icon_name)
+        icon_text = b"png file bytes"
+        self.add_tarinfo(icon_file, io.BytesIO(icon_text))
+
+    def test_extract_icon(self):
+        icon_name = "icon_a"
+        self.add_icon_file(icon_name)
+        package = self.create_vnfd_package()
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            extractor = rift.package.icon.PackageIconExtractor(self._log, tmp_dir)
+            extractor.extract_icons(package)
+
+            icon_file = extractor.get_extracted_icon_path(
+                    package.descriptor_type, package.descriptor_id, icon_name
+                    )
+            self.assertTrue(os.path.exists(icon_file))
+            self.assertTrue(os.path.isfile(icon_file))
+
+
+class TestPackageScriptExtractor(PackageTestCase):
+    def add_script_file(self, script_name):
+        script_file = "scripts/{}".format(script_name)
+        script_text = b"""#!/usr/bin/python
+        print("hi")
+        """
+        self.add_tarinfo(script_file, io.BytesIO(script_text), mode=0o666)
+
+    def test_extract_script(self):
+        script_name = "add_corporation.py"
+        self.add_script_file(script_name)
+        package = self.create_vnfd_package()
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            extractor = rift.package.script.PackageScriptExtractor(self._log, tmp_dir)
+            extractor.extract_scripts(package)
+
+            script_dir = extractor.get_extracted_script_path(package.descriptor_id, script_name)
+            self.assertTrue(os.path.exists(script_dir))
+            self.assertTrue(os.path.isfile(script_dir))
+
+class TestPackageCloudInitExtractor(PackageTestCase):
+    def add_cloud_init_file(self, cloud_init_filename):
+        script_file = "cloud_init/{}".format(cloud_init_filename)
+        script_text = b"""#cloud-config"""
+        self.add_tarinfo(script_file, io.BytesIO(script_text), mode=0o666)
+
+    def test_read_cloud_init(self):
+        script_name = "testVM_cloud_init.cfg"
+        valid_script_text = "#cloud-config"
+        self.add_cloud_init_file(script_name)
+        package = self.create_vnfd_package()
+
+        extractor = rift.package.cloud_init.PackageCloudInitExtractor(self._log)
+        cloud_init_contents = extractor.read_script(package, script_name)
+
+        self.assertEquals(cloud_init_contents, valid_script_text)
+
+    def test_cloud_init_file_missing(self):
+        script_name = "testVM_cloud_init.cfg"
+        package = self.create_vnfd_package()
+
+        extractor = rift.package.cloud_init.PackageCloudInitExtractor(self._log)
+
+        with self.assertRaises(rift.package.cloud_init.CloudInitExtractionError):
+            extractor.read_script(package, script_name)
+
+class TestPackageConfigExtractor(PackageTestCase):
+    def add_ns_config_file(self, nsd_id):
+        config_file = "ns_config/{}.yaml".format(nsd_id)
+        config_text = b""" ns_config """
+        self.add_tarinfo(config_file, io.BytesIO(config_text), mode=0o666)
+
+        return config_file
+
+    def add_vnf_config_file(self, vnfd_id, member_vnf_index):
+        config_file = "vnf_config/{}_{}.yaml".format(vnfd_id, member_vnf_index)
+        config_text = b""" vnf_config """
+        self.add_tarinfo(config_file, io.BytesIO(config_text), mode=0o666)
+
+        return config_file
+
+    def test_extract_config(self):
+        ns_config_file = self.add_ns_config_file("nsd_id")
+        vnf_config_file = self.add_vnf_config_file("vnfd_id", 1)
+        package = self.create_nsd_package()
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            extractor = rift.package.config.PackageConfigExtractor(self._log, tmp_dir)
+            extractor.extract_configs(package)
+
+            dest_ns_config_file = extractor.get_extracted_config_path(package.descriptor_id, ns_config_file)
+            dest_vnf_config_file = extractor.get_extracted_config_path(package.descriptor_id, vnf_config_file)
+            self.assertTrue(os.path.isfile(dest_ns_config_file))
+            self.assertTrue(os.path.isfile(dest_vnf_config_file))
+
+
+class TestPackageValidator(PackageTestCase):
+    def setUp(self):
+        super().setUp()
+        self._validator = rift.package.package.PackageChecksumValidator(self._log)
+
+    def create_checksum_file(self, file_md5_map):
+        checksum_hdl = io.BytesIO()
+        for file_name, md5 in file_md5_map.items():
+            checksum_hdl.write("{}  {}\n".format(md5, file_name).encode())
+
+        checksum_hdl.flush()
+        checksum_hdl.seek(0)
+
+        self.add_tarinfo("checksums.txt", checksum_hdl)
+        self._tar.addfile(tarfile.TarInfo(), checksum_hdl)
+
+    def create_nsd_package_with_checksum(self):
+        self.create_checksum_file(
+                {nsd_filename: file_hdl_md5(io.BytesIO(nsd_yaml))}
+                )
+        package = self.create_nsd_package()
+        return package
+
+    def test_package_no_checksum(self):
+        package = self.create_nsd_package()
+
+        # For now, a missing checksum file will be supported.
+        # No files will be validated.
+        validated_files = self._validator.validate(package)
+        self.assertEquals(validated_files, {})
+
+    def test_package_with_checksum(self):
+        package = self.create_nsd_package_with_checksum()
+        validated_files = self._validator.validate(package)
+        self.assertEquals(list(validated_files.keys()), [nsd_filename])
+
+
+class TestPackageStore(PackageTestCase):
+    def create_store(self, root_dir):
+        store = rift.package.store.PackageFilesystemStore(self._log, root_dir)
+        return store
+
+    def create_and_store_package(self, store):
+        package = self.create_nsd_package()
+        store.store_package(package)
+
+        return package
+
+    def test_store_package(self):
+        with tempfile.TemporaryDirectory() as root_dir:
+            store = self.create_store(root_dir)
+            package = self.create_and_store_package(store)
+            new_package = store.get_package(package.descriptor_id)
+            self.assertEquals(new_package.files, package.files)
+            self.assertEquals(type(new_package), type(package))
+
+    def test_store_reload_package(self):
+        with tempfile.TemporaryDirectory() as root_dir:
+            store = self.create_store(root_dir)
+            package = self.create_and_store_package(store)
+
+            new_store = self.create_store(root_dir)
+            new_package = new_store.get_package(package.descriptor_id)
+
+            self.assertEquals(new_package.files, package.files)
+            self.assertEquals(type(new_package), type(package))
+
+    def test_delete_package(self):
+        with tempfile.TemporaryDirectory() as root_dir:
+            store = self.create_store(root_dir)
+            package = self.create_and_store_package(store)
+
+            store.get_package(package.descriptor_id)
+            store.delete_package(package.descriptor_id)
+
+            with self.assertRaises(rift.package.store.PackageStoreError):
+                store.get_package(package.descriptor_id)
+
+    def test_store_exist_package(self):
+        with tempfile.TemporaryDirectory() as root_dir:
+            store = self.create_store(root_dir)
+            package = self.create_and_store_package(store)
+
+            with self.assertRaises(rift.package.store.PackageStoreError):
+                store.store_package(package)
+
+
+class TestTemporaryPackage(PackageTestCase):
+    def test_temp_package(self):
+        self._tar_file_hdl = tempfile.NamedTemporaryFile(delete=False)
+        self._tar = tarfile.open(fileobj=self._tar_file_hdl, mode="w|gz")
+
+        self.assertTrue(os.path.exists(self._tar_file_hdl.name))
+
+        package = self.create_nsd_package()
+        with rift.package.package.TemporaryPackage(self._log, package, self._tar_file_hdl) as temp_pkg:
+            self.assertTrue(package is temp_pkg)
+            self.assertEquals(package.files, temp_pkg.files)
+
+        self.assertFalse(os.path.exists(self._tar_file_hdl.name))
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_serializer.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_serializer.py
new file mode 100755
index 0000000..af8e1f8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_serializer.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import logging
+import io
+import os
+import sys
+import tempfile
+import unittest
+import xmlrunner
+
+from rift.package.convert import (
+        ProtoMessageSerializer,
+        UnknownExtensionError,
+        SerializationError,
+        )
+
+import gi
+gi.require_version('RwpersonDbYang', '1.0')
+gi.require_version('RwYang', '1.0')
+
+from gi.repository import (
+        RwpersonDbYang,
+        RwYang,
+        )
+
+class TestSerializer(unittest.TestCase):
+    def setUp(self):
+        self._serializer = ProtoMessageSerializer(
+                RwpersonDbYang,
+                RwpersonDbYang.Person
+                )
+
+        self._sample_person = RwpersonDbYang.Person(name="Fred")
+        self._model = RwYang.model_create_libncx()
+        self._model.load_schema_ypbc(RwpersonDbYang.get_schema())
+
+    def test_from_xml_file(self):
+        sample_person_xml = self._sample_person.to_xml_v2(self._model)
+        with io.StringIO(sample_person_xml) as file_hdl:
+            person = self._serializer.from_file_hdl(file_hdl, ".xml")
+            self.assertEqual(person, self._sample_person)
+
+    def test_from_yaml_file(self):
+        sample_person_yaml = self._sample_person.to_yaml(self._model)
+        with io.StringIO(sample_person_yaml) as file_hdl:
+
+            person = self._serializer.from_file_hdl(file_hdl, ".yml")
+            self.assertEqual(person, self._sample_person)
+
+    def test_from_json_file(self):
+        sample_person_json = self._sample_person.to_json(self._model)
+        with io.StringIO(sample_person_json) as file_hdl:
+
+            person = self._serializer.from_file_hdl(file_hdl, ".json")
+            self.assertEqual(person, self._sample_person)
+
+    def test_unknown_file_extension(self):
+        with io.StringIO("asdf") as file_hdl:
+            with self.assertRaises(UnknownExtensionError):
+                self._serializer.from_file_hdl(file_hdl, ".foo")
+
+    def test_raises_serialization_error(self):
+        with io.StringIO('</foo>') as file_hdl:
+            with self.assertRaises(SerializationError):
+                person = self._serializer.from_file_hdl(file_hdl, ".json")
+                print(person)
+
+    def test_to_json_string(self):
+        json_str = self._serializer.to_json_string(self._sample_person)
+
+        person = RwpersonDbYang.Person.from_json(self._model, json_str)
+        self.assertEqual(person, self._sample_person)
+
+    def test_to_json_string_invalid_type(self):
+        with self.assertRaises(TypeError):
+            self._serializer.to_json_string(RwpersonDbYang.FlatPerson(name="bob"))
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwmonitor/CMakeLists.txt b/rwlaunchpad/plugins/rwmonitor/CMakeLists.txt
new file mode 100644
index 0000000..6bc0195
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonitor/CMakeLists.txt
@@ -0,0 +1,39 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Joshua Downer
+# Creation Date: 2015/10/30
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwmonitor)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/core.py
+    rift/tasklets/${TASKLET_NAME}/tasklet.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/rwlaunchpad/plugins/rwmonitor/Makefile b/rwlaunchpad/plugins/rwmonitor/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonitor/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/__init__.py b/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/__init__.py
new file mode 100644
index 0000000..47bbfc8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/__init__.py
@@ -0,0 +1 @@
+from .tasklet import MonitorTasklet
diff --git a/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py b/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py
new file mode 100644
index 0000000..b97b2f5
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py
@@ -0,0 +1,880 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import abc
+import asyncio
+import collections
+import concurrent.futures
+import importlib
+import time
+
+import gi
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwMon', '1.0')
+from gi.repository import (
+        RwTypes,
+        RwVnfrYang,
+        )
+
+import rw_peas
+
+
+class VdurMissingVimIdError(Exception):
+    def __init__(self, vdur_id):
+        super().__init__("VDUR:{} is has no VIM ID".format(vdur_id))
+
+
+class VdurAlreadyRegisteredError(Exception):
+    def __init__(self, vdur_id):
+        super().__init__("VDUR:{} is already registered".format(vdur_id))
+
+
+class AccountInUseError(Exception):
+    pass
+
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class AccountAlreadyRegisteredError(Exception):
+    def __init__(self, account_name):
+        msg = "'{}' already registered".format(account_name)
+        super().__init__(account_name)
+
+
+class PluginUnavailableError(Exception):
+    pass
+
+
+class PluginNotSupportedError(PluginUnavailableError):
+    pass
+
+
+class AlarmCreateError(Exception):
+    def __init__(self):
+        super().__init__("failed to create alarm")
+
+
+class AlarmDestroyError(Exception):
+    def __init__(self):
+        super().__init__("failed to destroy alarm")
+
+
+class PluginFactory(object):
+    __metaclass__ = abc.ABCMeta
+
+    @abc.abstractmethod
+    def create(self, cloud_account, plugin_name):
+        pass
+
+    @property
+    def name(self):
+        return self.__class__.PLUGIN_NAME
+
+    @property
+    def fallbacks(self):
+        try:
+            return list(self.__class__.FALLBACKS)
+        except Exception:
+            return list()
+
+
+class MonascaPluginFactory(PluginFactory):
+    PLUGIN_NAME = "monasca"
+    FALLBACKS = ["ceilometer",]
+
+    def create(self, cloud_account):
+        raise PluginUnavailableError()
+
+
+class CeilometerPluginFactory(PluginFactory):
+    PLUGIN_NAME = "ceilometer"
+    FALLBACKS = ["unavailable",]
+
+    def create(self, cloud_account):
+        plugin = rw_peas.PeasPlugin("rwmon_ceilometer", 'RwMon-1.0')
+        impl = plugin.get_interface("Monitoring")
+
+        # Check that the plugin is available on the platform associated with
+        # the cloud account
+        _, available = impl.nfvi_metrics_available(cloud_account)
+        if not available:
+            raise PluginUnavailableError()
+
+        return impl
+
+
+class UnavailablePluginFactory(PluginFactory):
+    PLUGIN_NAME = "unavailable"
+
+    class UnavailablePlugin(object):
+        def nfvi_metrics_available(self, cloud_account):
+            return None, False
+
+    def create(self, cloud_account):
+        return UnavailablePluginFactory.UnavailablePlugin()
+
+
+class MockPluginFactory(PluginFactory):
+    PLUGIN_NAME = "mock"
+    FALLBACKS = ["unavailable",]
+
+    def create(self, cloud_account):
+        plugin = rw_peas.PeasPlugin("rwmon_mock", 'RwMon-1.0')
+        impl = plugin.get_interface("Monitoring")
+
+        # Check that the plugin is available on the platform associated with
+        # the cloud account
+        _, available = impl.nfvi_metrics_available(cloud_account)
+        if not available:
+            raise PluginUnavailableError()
+
+        return impl
+
+
+class NfviMetricsPluginManager(object):
+    def __init__(self, log):
+        self._plugins = dict()
+        self._log = log
+        self._factories = dict()
+
+        self.register_plugin_factory(MockPluginFactory())
+        self.register_plugin_factory(CeilometerPluginFactory())
+        self.register_plugin_factory(MonascaPluginFactory())
+        self.register_plugin_factory(UnavailablePluginFactory())
+
+    @property
+    def log(self):
+        return self._log
+
+    def register_plugin_factory(self, factory):
+        self._factories[factory.name] = factory
+
+    def plugin(self, account_name):
+        return self._plugins[account_name]
+
+    def register(self, cloud_account, plugin_name):
+        # Check to see if the cloud account has already been registered
+        if cloud_account.name in self._plugins:
+            raise AccountAlreadyRegisteredError(cloud_account.name)
+
+        if plugin_name not in self._factories:
+            raise PluginNotSupportedError(plugin_name)
+
+        # Create a plugin from one of the factories
+        fallbacks = [plugin_name,]
+
+        while fallbacks:
+            name = fallbacks.pop(0)
+            try:
+                factory = self._factories[name]
+                plugin = factory.create(cloud_account)
+                self._plugins[cloud_account.name] = plugin
+                return
+
+            except PluginUnavailableError as e:
+                self.log.warning("plugin for {} unavailable".format(name))
+                fallbacks.extend(factory.fallbacks)
+
+        raise PluginUnavailableError()
+
+    def unregister(self, account_name):
+        if account_name in self._plugins:
+            del self._plugins[account_name]
+
+
+class NfviMetrics(object):
+    """
+    The NfviMetrics class contains the logic to retrieve NFVI metrics for a
+    particular VDUR. Of particular importance is that this object caches the
+    metrics until the data become stale so that it does not create excessive
+    load upon the underlying data-source.
+    """
+
+    # The sample interval defines the maximum time (secs) that metrics will be
+    # cached for. This duration should coincide with the sampling interval used
+    # by the underlying data-source to capture metrics.
+    SAMPLE_INTERVAL = 10
+
+    # The maximum time (secs) an instance will wait for a request to the data
+    # source to be completed
+    TIMEOUT = 2
+
+    def __init__(self, log, loop, account, plugin, vdur):
+        """Creates an instance of NfviMetrics
+
+        Arguments:
+            manager - a NfviInterface instance
+            account - a CloudAccount instance
+            plugin  - an NFVI plugin
+            vdur    - a VDUR instance
+
+        """
+        self._log = log
+        self._loop = loop
+        self._account = account
+        self._plugin = plugin
+        self._timestamp = 0
+        self._metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+        self._vdur = vdur
+        self._vim_id = vdur.vim_id
+        self._updating = None
+
+    @property
+    def log(self):
+        """The logger used by NfviMetrics"""
+        return self._log
+
+    @property
+    def loop(self):
+        """The current asyncio loop"""
+        return self._loop
+
+    @property
+    def vdur(self):
+        """The VDUR that these metrics are associated with"""
+        return self._vdur
+
+    def retrieve(self):
+        """Return the NFVI metrics for this VDUR
+
+        This function will immediately return the current, known NFVI metrics
+        for the associated VDUR. It will also, if the data are stale, schedule
+        a call to the data-source to retrieve new data.
+
+        """
+        if self.should_update():
+            self._updating = self.loop.create_task(self.update())
+
+        return self._metrics
+
+    def should_update(self):
+        """Return a boolean indicating whether an update should be performed"""
+        running = self._updating is not None and not self._updating.done()
+        overdue = time.time() > self._timestamp + NfviMetrics.SAMPLE_INTERVAL
+
+        return overdue and not running
+
+    @asyncio.coroutine
+    def update(self):
+        """Update the NFVI metrics for the associated VDUR
+
+        This coroutine will request new metrics from the data-source and update
+        the current metrics.
+
+        """
+        try:
+            try:
+                # Make the request to the plugin in a separate thread and do
+                # not exceed the timeout
+                _, metrics = yield from asyncio.wait_for(
+                        self.loop.run_in_executor(
+                            None,
+                            self._plugin.nfvi_metrics,
+                            self._account,
+                            self._vim_id,
+                            ),
+                        timeout=NfviMetrics.TIMEOUT,
+                        loop=self.loop,
+                        )
+
+            except asyncio.TimeoutError:
+                msg = "timeout on request for nfvi metrics (vim-id = {})"
+                self.log.warning(msg.format(self._vim_id))
+                return
+
+            except Exception as e:
+                self.log.exception(e)
+                return
+
+            try:
+                # Create uninitialized metric structure
+                vdu_metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+
+                # VCPU
+                vdu_metrics.vcpu.total = self.vdur.vm_flavor.vcpu_count
+                vdu_metrics.vcpu.utilization = metrics.vcpu.utilization
+
+                # Memory (in bytes)
+                vdu_metrics.memory.used = metrics.memory.used
+                vdu_metrics.memory.total = self.vdur.vm_flavor.memory_mb
+                vdu_metrics.memory.utilization = 100 * vdu_metrics.memory.used / vdu_metrics.memory.total
+
+                # Storage
+                vdu_metrics.storage.used = metrics.storage.used
+                vdu_metrics.storage.total = 1e9 * self.vdur.vm_flavor.storage_gb
+                vdu_metrics.storage.utilization = 100 * vdu_metrics.storage.used / vdu_metrics.storage.total
+
+                # Network (incoming)
+                vdu_metrics.network.incoming.packets = metrics.network.incoming.packets
+                vdu_metrics.network.incoming.packet_rate = metrics.network.incoming.packet_rate
+                vdu_metrics.network.incoming.bytes = metrics.network.incoming.bytes
+                vdu_metrics.network.incoming.byte_rate = metrics.network.incoming.byte_rate
+
+                # Network (outgoing)
+                vdu_metrics.network.outgoing.packets = metrics.network.outgoing.packets
+                vdu_metrics.network.outgoing.packet_rate = metrics.network.outgoing.packet_rate
+                vdu_metrics.network.outgoing.bytes = metrics.network.outgoing.bytes
+                vdu_metrics.network.outgoing.byte_rate = metrics.network.outgoing.byte_rate
+
+                # External ports
+                vdu_metrics.external_ports.total = len(self.vdur.external_interface)
+
+                # Internal ports
+                vdu_metrics.internal_ports.total = len(self.vdur.internal_interface)
+
+                self._metrics = vdu_metrics
+
+            except Exception as e:
+                self.log.exception(e)
+
+        finally:
+            # Regardless of the result of the query, we want to make sure that
+            # we do not poll the data source until another sample duration has
+            # passed.
+            self._timestamp = time.time()
+
+
+class NfviMetricsCache(object):
+    def __init__(self, log, loop, plugin_manager):
+        self._log = log
+        self._loop = loop
+        self._plugin_manager = plugin_manager
+        self._nfvi_metrics = dict()
+
+        self._vim_to_vdur = dict()
+        self._vdur_to_vim = dict()
+
+    def create_entry(self, account, vdur):
+        plugin = self._plugin_manager.plugin(account.name)
+        metrics = NfviMetrics(self._log, self._loop, account, plugin, vdur)
+        self._nfvi_metrics[vdur.vim_id] = metrics
+
+        self._vim_to_vdur[vdur.vim_id] = vdur.id
+        self._vdur_to_vim[vdur.id] = vdur.vim_id
+
+    def destroy_entry(self, vdur_id):
+        vim_id = self._vdur_to_vim[vdur_id]
+
+        del self._nfvi_metrics[vim_id]
+        del self._vdur_to_vim[vdur_id]
+        del self._vim_to_vdur[vim_id]
+
+    def retrieve(self, vim_id):
+        return self._nfvi_metrics[vim_id].retrieve()
+
+    def to_vim_id(self, vdur_id):
+        return self._vdur_to_vim[vdur_id]
+
+    def to_vdur_id(self, vim_id):
+        return self._vim_to_vdur[vim_id]
+
+    def contains_vdur_id(self, vdur_id):
+        return vdur_id in self._vdur_to_vim
+
+    def contains_vim_id(self, vim_id):
+        return vim_id in self._vim_to_vdur
+
+
+class NfviInterface(object):
+    """
+    The NfviInterface serves as an interface for communicating with the
+    underlying infrastructure, i.e. retrieving metrics for VDURs that have been
+    registered with it and managing alarms.
+
+    The NfviInterface should only need to be invoked using a cloud account and
+    optionally a VIM ID; It should not need to handle mapping from VDUR ID to
+    VIM ID.
+    """
+
+    def __init__(self, loop, log, plugin_manager, cache):
+        """Creates an NfviInterface instance
+
+        Arguments:
+            loop           - an event loop
+            log            - a logger
+            plugin_manager - an instance of NfviMetricsPluginManager
+            cache          - an instance of NfviMetricsCache
+
+        """
+        self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=16)
+        self._plugin_manager = plugin_manager
+        self._cache = cache
+        self._loop = loop
+        self._log = log
+
+    @property
+    def loop(self):
+        """The event loop used by this NfviInterface"""
+        return self._loop
+
+    @property
+    def log(self):
+        """The event log used by this NfviInterface"""
+        return self._log
+
+    @property
+    def metrics(self):
+        """The list of metrics contained in this NfviInterface"""
+        return list(self._cache._nfvi_metrics.values())
+
+    def nfvi_metrics_available(self, account):
+        plugin = self._plugin_manager.plugin(account.name)
+        _, available = plugin.nfvi_metrics_available(account)
+        return available
+
+    def retrieve(self, vdur_id):
+        """Returns the NFVI metrics for the specified VDUR
+
+        Note, a VDUR must be registered with a NfviInterface before
+        metrics can be retrieved for it.
+
+        Arguments:
+            vdur_id - the ID of the VDUR to whose metrics should be retrieve
+
+        Returns:
+            An NfviMetrics object for the specified VDUR
+
+        """
+        return self._cache.retrieve(self._cache.to_vim_id(vdur_id))
+
+    @asyncio.coroutine
+    def alarm_create(self, account, vim_id, alarm, timeout=5):
+        """Create a new alarm
+
+        Arguments:
+            account - a CloudAccount instance
+            vim_id  - the VM to associate with this alarm
+            alarm   - an alarm structure
+            timeout - the request timeout (sec)
+
+        Raises:
+            If the data source does not respond in a timely manner, an
+            asyncio.TimeoutError will be raised.
+
+        """
+        plugin = self._plugin_manager.plugin(account.name)
+        status = yield from asyncio.wait_for(
+                self.loop.run_in_executor(
+                    None,
+                    plugin.do_alarm_create,
+                    account,
+                    vim_id,
+                    alarm,
+                    ),
+                timeout=timeout,
+                loop=self.loop,
+                )
+
+        if status == RwTypes.RwStatus.FAILURE:
+            raise AlarmCreateError()
+
+    @asyncio.coroutine
+    def alarm_destroy(self, account, alarm_id, timeout=5):
+        """Destroy an existing alarm
+
+        Arguments:
+            account  - a CloudAccount instance
+            alarm_id - the identifier of the alarm to destroy
+            timeout  - the request timeout (sec)
+
+        Raises:
+            If the data source does not respond in a timely manner, an
+            asyncio.TimeoutError will be raised.
+
+        """
+        plugin = self._plugin_manager.plugin(account.name)
+        status = yield from asyncio.wait_for(
+                self.loop.run_in_executor(
+                    None,
+                    plugin.do_alarm_delete,
+                    account,
+                    alarm_id,
+                    ),
+                timeout=timeout,
+                loop=self.loop,
+                )
+
+        if status == RwTypes.RwStatus.FAILURE:
+            raise AlarmDestroyError()
+
+
+class InstanceConfiguration(object):
+    """
+    The InstanceConfiguration class represents configuration information that
+    affects the behavior of the monitor. Essentially this class should contain
+    not functional behavior but serve as a convenient way to share data amongst
+    the components of the monitoring system.
+    """
+
+    def __init__(self):
+        self.polling_period = None
+        self.max_polling_frequency = None
+        self.min_cache_lifetime = None
+        self.public_ip = None
+
+
+class Monitor(object):
+    """
+    The Monitor class is intended to act as a unifying interface for the
+    different sub-systems that are used to monitor the NFVI.
+    """
+
+    def __init__(self, loop, log, config):
+        """Create a Monitor object
+
+        Arguments:
+            loop   - an event loop
+            log    - the logger used by this object
+            config - an instance of InstanceConfiguration
+
+        """
+        self._loop = loop
+        self._log = log
+
+        self._cloud_accounts = dict()
+        self._nfvi_plugins = NfviMetricsPluginManager(log)
+        self._cache = NfviMetricsCache(log, loop, self._nfvi_plugins)
+        self._nfvi_interface = NfviInterface(loop, log, self._nfvi_plugins, self._cache)
+        self._config = config
+        self._vnfrs = dict()
+        self._vnfr_to_vdurs = collections.defaultdict(set)
+        self._alarms = collections.defaultdict(list)
+
+    @property
+    def loop(self):
+        """The event loop used by this object"""
+        return self._loop
+
+    @property
+    def log(self):
+        """The event log used by this object"""
+        return self._log
+
+    @property
+    def cache(self):
+        """The NFVI metrics cache"""
+        return self._cache
+
+    @property
+    def metrics(self):
+        """The list of metrics contained in this Monitor"""
+        return self._nfvi_interface.metrics
+
+    def nfvi_metrics_available(self, account):
+        """Returns a boolean indicating whether NFVI metrics are available
+
+        Arguments:
+            account - the name of the cloud account to check
+
+        Returns:
+            a boolean indicating availability of NFVI metrics
+
+        """
+        if account not in self._cloud_accounts:
+            return False
+
+        cloud_account = self._cloud_accounts[account]
+        return self._nfvi_interface.nfvi_metrics_available(cloud_account)
+
+    def add_cloud_account(self, account):
+        """Add a cloud account to the monitor
+
+        Arguments:
+            account - a cloud account object
+
+        Raises:
+            If the cloud account has already been added to the monitor, an
+            AccountAlreadyRegisteredError is raised.
+
+        """
+        if account.name in self._cloud_accounts:
+            raise AccountAlreadyRegisteredError(account.name)
+
+        self._cloud_accounts[account.name] = account
+
+        if account.account_type == "openstack":
+            self.register_cloud_account(account, "monasca")
+        else:
+            self.register_cloud_account(account, "mock")
+
+    def remove_cloud_account(self, account_name):
+        """Remove a cloud account from the monitor
+
+        Arguments:
+            account_name - removes the cloud account that has this name
+
+        Raises:
+            If the specified cloud account cannot be found, an
+            UnknownAccountError is raised.
+
+        """
+        if account_name not in self._cloud_accounts:
+            raise UnknownAccountError()
+
+        # Make sure that there are no VNFRs associated with this account
+        for vnfr in self._vnfrs.values():
+            if vnfr.cloud_account == account_name:
+                raise AccountInUseError()
+
+        del self._cloud_accounts[account_name]
+        self._nfvi_plugins.unregister(account_name)
+
+    def get_cloud_account(self, account_name):
+        """Returns a cloud account by name
+
+        Arguments:
+            account_name - the name of the account to return
+
+        Raises:
+            An UnknownAccountError is raised if there is not account object
+            associated with the provided name
+
+        Returns:
+            A cloud account object
+
+        """
+        if account_name not in self._cloud_accounts:
+            raise UnknownAccountError()
+
+        return self._cloud_accounts[account_name]
+
+    def register_cloud_account(self, account, plugin_name):
+        """Register a cloud account with an NFVI plugin
+
+        Note that a cloud account can only be registered for one plugin at a
+        time.
+
+        Arguments:
+            account     - the cloud account to associate with the plugin
+            plugin_name - the name of the plugin to use
+
+        """
+        self._nfvi_plugins.register(account, plugin_name)
+
+    def add_vnfr(self, vnfr):
+        """Add a VNFR to the monitor
+
+        Arguments:
+            vnfr - a VNFR object
+
+        Raises:
+            An UnknownAccountError is raised if the account name contained in
+            the VNFR does not reference a cloud account that has been added to
+            the monitor.
+
+        """
+        if vnfr.cloud_account not in self._cloud_accounts:
+            raise UnknownAccountError()
+
+        account = self._cloud_accounts[vnfr.cloud_account]
+
+        for vdur in vnfr.vdur:
+            try:
+                self.add_vdur(account, vdur)
+                self._vnfr_to_vdurs[vnfr.id].add(vdur.id)
+            except (VdurMissingVimIdError, VdurAlreadyRegisteredError):
+                pass
+
+        self._vnfrs[vnfr.id] = vnfr
+
+    def update_vnfr(self, vnfr):
+        """Updates the VNFR information in the monitor
+
+        Arguments:
+            vnfr - a VNFR object
+
+        Raises:
+            An UnknownAccountError is raised if the account name contained in
+            the VNFR does not reference a cloud account that has been added to
+            the monitor.
+
+        """
+        if vnfr.cloud_account not in self._cloud_accounts:
+            raise UnknownAccountError()
+
+        account = self._cloud_accounts[vnfr.cloud_account]
+
+        for vdur in vnfr.vdur:
+            try:
+                self.add_vdur(account, vdur)
+                self._vnfr_to_vdurs[vnfr.id].add(vdur.id)
+            except (VdurMissingVimIdError, VdurAlreadyRegisteredError):
+                pass
+
+    def remove_vnfr(self, vnfr_id):
+        """Remove a VNFR from the monitor
+
+        Arguments:
+            vnfr_id - the ID of the VNFR to remove
+
+        """
+        vdur_ids = self._vnfr_to_vdurs[vnfr_id]
+
+        for vdur_id in vdur_ids:
+            self.remove_vdur(vdur_id)
+
+        del self._vnfrs[vnfr_id]
+        del self._vnfr_to_vdurs[vnfr_id]
+
+    def add_vdur(self, account, vdur):
+        """Adds a VDUR to the monitor
+
+        Adding a VDUR to the monitor will automatically create a NFVI metrics
+        object that is associated with the VDUR so that the monitor cane
+        provide the NFVI metrics associated with the VDUR.
+
+        Arguments:
+            account - the cloud account associated with the VNFR that contains
+                      the provided VDUR
+            vdur    - a VDUR object
+
+        Raises:
+            A VdurMissingVimIdError is raised if the provided VDUR does not
+            contain a VIM ID. A VdurAlreadyRegisteredError is raised if the ID
+            associated with the VDUR has already been registered.
+
+        """
+        if not vdur.vim_id:
+            raise VdurMissingVimIdError(vdur.id)
+
+        if self.is_registered_vdur(vdur.id):
+            raise VdurAlreadyRegisteredError(vdur.id)
+
+        self.cache.create_entry(account, vdur)
+
+    def remove_vdur(self, vdur_id):
+        """Removes a VDUR from the monitor
+
+        Arguments:
+            vdur_id - the ID of the VDUR to remove
+
+        """
+        self.cache.destroy_entry(vdur_id)
+
+        # Schedule any alarms associated with the VDUR for destruction
+        for account_name, alarm_id in self._alarms[vdur_id]:
+            self.loop.create_task(self.destroy_alarm(account_name, alarm_id))
+
+        del self._alarms[vdur_id]
+
+    def list_vdur(self, vnfr_id):
+        """Returns a list of VDURs
+
+        Arguments:
+            vnfr_id - the identifier of the VNFR contains the VDURs
+
+        Returns:
+            A list of VDURs
+
+        """
+        return self._vnfrs[vnfr_id].vdur
+
+    def is_registered_vnfr(self, vnfr_id):
+        """Returns True if the VNFR is registered with the monitor
+
+        Arguments:
+            vnfr_id - the ID of the VNFR to check
+
+        Returns:
+            True if the VNFR is registered and False otherwise.
+
+        """
+        return vnfr_id in self._vnfrs
+
+    def is_registered_vdur(self, vdur_id):
+        """Returns True if the VDUR is registered with the monitor
+
+        Arguments:
+            vnfr_id - the ID of the VDUR to check
+
+        Returns:
+            True if the VDUR is registered and False otherwise.
+
+        """
+        return self.cache.contains_vdur_id(vdur_id)
+
+    def retrieve_nfvi_metrics(self, vdur_id):
+        """Retrieves the NFVI metrics associated with a VDUR
+
+        Arguments:
+            vdur_id - the ID of the VDUR whose metrics are to be retrieved
+
+        Returns:
+            NFVI metrics for a VDUR
+
+        """
+        return self._nfvi_interface.retrieve(vdur_id)
+
+    @asyncio.coroutine
+    def create_alarm(self, account_name, vdur_id, alarm):
+        """Create a new alarm
+
+        This function create an alarm and augments the provided endpoints with
+        endpoints to the launchpad if the launchpad has a public IP. The added
+        endpoints are of the form,
+
+            http://{host}:4568/{platform}/{vdur_id}/{action}
+
+        where the 'action' is one of 'ok', 'alarm', or 'insufficient_data'. The
+        messages that are pushed to the launchpad are not defined by RIFT so
+        we need to know which platform an alarm is sent from in order to
+        properly parse it.
+
+
+        Arguments:
+            account_name - the name of the account to use to create the alarm
+            vdur_id      - the identifier of the VDUR to associated with the
+                           alarm. If the identifier is None, the alarm is not
+                           associated with a specific VDUR.
+            alarm        - the alarm data
+
+        """
+        account = self.get_cloud_account(account_name)
+        vim_id = self.cache.to_vim_id(vdur_id)
+
+        # If the launchpad has a public IP, augment the action webhooks to
+        # include the launchpad so that the alarms can be broadcast as event
+        # notifications.
+        if self._config.public_ip is not None:
+            url = "http://{host}:4568/{platform}/{vdur_id}".format(
+                    host=self._config.public_ip,
+                    platform=account.account_type,
+                    vdur_id=vudr_id,
+                    )
+            alarm.actions.ok.add().url = url + "/ok"
+            alarm.actions.alarm.add().url = url + "/alarm"
+            alarm.actions.alarm.add().url = url + "/insufficient_data"
+
+        yield from self._nfvi_interface.alarm_create(account, vim_id, alarm)
+
+        # Associate the VDUR ID with the alarm ID
+        self._alarms[vdur_id].append((account_name, alarm.alarm_id))
+
+    @asyncio.coroutine
+    def destroy_alarm(self, account_name, alarm_id):
+        """Destroy an existing alarm
+
+        Arugments:
+            account_name - the name of the account that owns the alert
+            alarm_id     - the identifier of the alarm to destroy
+
+        """
+        account = self.get_cloud_account(account_name)
+        yield from self._nfvi_interface.alarm_destroy(account, alarm_id)
diff --git a/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py b/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py
new file mode 100644
index 0000000..4ab351e
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py
@@ -0,0 +1,714 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+"""
+NFVI MONITORING
+==================================================
+
+Data Model
+--------------------------------------------------
+
+The monitoring tasklet consists of several types of data that are associated
+with one another. The highest level data are the cloud accounts. These objects
+contain authentication information that is used to retrieve metrics as well as
+the provider (and hence the available data source platforms).
+
+Each cloud account is associated with an NfviMetricsPlugin. This is a
+one-to-one relationship. The plugin is the interface to the data source that
+will actually provide the NFVI metrics.
+
+Each cloud account is also associated with several VNFRs. Each VNFR, in turn,
+contains several VDURs. The VDURs represent the level that the NFVI metrics are
+collected at. However, it is important that the relationships among all these
+different objects are carefully managed.
+
+
+        CloudAccount -------------- NfviMetricsPlugin
+            / \
+           /   \
+          / ... \
+         /       \
+       VNFR     VNFR
+                 /\
+                /  \
+               /    \
+              / .... \
+             /        \
+           VDUR      VDUR
+            |          |
+            |          |
+         Metrics     Metrics
+
+
+Monitoring Tasklet
+--------------------------------------------------
+
+The monitoring tasklet (the MonitorTasklet class) is primarily responsible for
+the communicating between DTS and the application (the Monitor class), which
+provides the logic for managing and interacting with the data model (see
+above).
+
+"""
+
+import asyncio
+import concurrent.futures
+import time
+
+import tornado.web
+import tornado.httpserver
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwLog', '1.0')
+gi.require_version('RwMonitorYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+from gi.repository import (
+    RwDts as rwdts,
+    RwLog as rwlog,
+    RwMonitorYang as rwmonitor,
+    RwLaunchpadYang,
+    RwVnfrYang,
+    VnfrYang,
+)
+
+import rift.tasklets
+import rift.mano.cloud
+
+from . import core
+
+
+class DtsHandler(object):
+    def __init__(self, tasklet):
+        self.reg = None
+        self.tasklet = tasklet
+
+    @property
+    def log(self):
+        return self.tasklet.log
+
+    @property
+    def log_hdl(self):
+        return self.tasklet.log_hdl
+
+    @property
+    def dts(self):
+        return self.tasklet.dts
+
+    @property
+    def loop(self):
+        return self.tasklet.loop
+
+    @property
+    def classname(self):
+        return self.__class__.__name__
+
+class VnfrCatalogSubscriber(DtsHandler):
+    XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+
+    @asyncio.coroutine
+    def register(self):
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            try:
+                if msg is None:
+                    return
+
+                if action == rwdts.QueryAction.CREATE:
+                    self.tasklet.on_vnfr_create(msg)
+
+                elif action == rwdts.QueryAction.UPDATE:
+                    self.tasklet.on_vnfr_update(msg)
+
+                elif action == rwdts.QueryAction.DELETE:
+                    self.tasklet.on_vnfr_delete(msg)
+
+            except Exception as e:
+                self.log.exception(e)
+
+            finally:
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        handler = rift.tasklets.DTS.RegistrationHandler(
+                on_prepare=on_prepare,
+                )
+
+        with self.dts.group_create() as group:
+            group.register(
+                    xpath=VnfrCatalogSubscriber.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER,
+                    handler=handler,
+                    )
+
+
+class NsInstanceConfigSubscriber(DtsHandler):
+    XPATH = "C,/nsr:ns-instance-config"
+
+    @asyncio.coroutine
+    def register(self):
+        def on_apply(dts, acg, xact, action, _):
+            xact_config = list(self.reg.get_xact_elements(xact))
+            for config in xact_config:
+                self.tasklet.on_ns_instance_config_update(config)
+
+        acg_handler = rift.tasklets.AppConfGroup.Handler(
+                        on_apply=on_apply,
+                        )
+
+        with self.dts.appconf_group_create(acg_handler) as acg:
+            self.reg = acg.register(
+                    xpath=NsInstanceConfigSubscriber.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER,
+                    )
+
+
+class CloudAccountDtsHandler(DtsHandler):
+    def __init__(self, tasklet):
+        super().__init__(tasklet)
+        self._cloud_cfg_subscriber = None
+
+    def register(self):
+        self.log.debug("creating cloud account config handler")
+        self._cloud_cfg_subscriber = rift.mano.cloud.CloudAccountConfigSubscriber(
+               self.dts, self.log, self.log_hdl,
+               rift.mano.cloud.CloudAccountConfigCallbacks(
+                   on_add_apply=self.tasklet.on_cloud_account_create,
+                   on_delete_apply=self.tasklet.on_cloud_account_delete,
+               )
+           )
+        self._cloud_cfg_subscriber.register()
+
+
+class VdurNfviMetricsPublisher(DtsHandler):
+    """
+    A VdurNfviMetricsPublisher is responsible for publishing the NFVI metrics
+    from a single VDU.
+    """
+
+    XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id='{}']/vnfr:vdur[vnfr:id='{}']/rw-vnfr:nfvi-metrics"
+
+    # This timeout defines the length of time the publisher will wait for a
+    # request to a data source to complete. If the request cannot be completed
+    # before timing out, the current data will be published instead.
+    TIMEOUT = 2.0
+
+    def __init__(self, tasklet, vnfr, vdur):
+        """Create an instance of VdurNvfiPublisher
+
+        Arguments:
+            tasklet - the tasklet
+            vnfr    - the VNFR that contains the VDUR
+            vdur    - the VDUR of the VDU whose metrics are published
+
+        """
+        super().__init__(tasklet)
+        self._vnfr = vnfr
+        self._vdur = vdur
+
+        self._handle = None
+        self._xpath = VdurNfviMetricsPublisher.XPATH.format(vnfr.id, vdur.id)
+
+        self._deregistered = asyncio.Event(loop=self.loop)
+
+    @property
+    def vnfr(self):
+        """The VNFR associated with this publisher"""
+        return self._vnfr
+
+    @property
+    def vdur(self):
+        """The VDUR associated with this publisher"""
+        return self._vdur
+
+    @property
+    def vim_id(self):
+        """The VIM ID of the VDUR associated with this publisher"""
+        return self._vdur.vim_id
+
+    @property
+    def xpath(self):
+        """The XPATH that the metrics are published on"""
+        return self._xpath
+
+    @asyncio.coroutine
+    def dts_on_prepare(self, xact_info, action, ks_path, msg):
+        """Handles the DTS on_prepare callback"""
+        self.log.debug("{}:dts_on_prepare".format(self.classname))
+
+        if action == rwdts.QueryAction.READ:
+            # If the publisher has been deregistered, the xpath element has
+            # been deleted. So we do not want to publish the metrics and
+            # re-created the element.
+            if not self._deregistered.is_set():
+                metrics = self.tasklet.on_retrieve_nfvi_metrics(self.vdur.id)
+                xact_info.respond_xpath(
+                        rwdts.XactRspCode.MORE,
+                        self.xpath,
+                        metrics,
+                        )
+
+        xact_info.respond_xpath(rwdts.XactRspCode.ACK, self.xpath)
+
+    @asyncio.coroutine
+    def register(self):
+        """Register the publisher with DTS"""
+        self._handle = yield from self.dts.register(
+                xpath=self.xpath,
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=self.dts_on_prepare,
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    def deregister(self):
+        """Deregister the publisher from DTS"""
+        # Mark the publisher for deregistration. This prevents the publisher
+        # from creating an element after it has been deleted.
+        self._deregistered.set()
+
+        # Now that we are done with the registration handle, delete the element
+        # and tell DTS to deregister it
+        self._handle.delete_element(self.xpath)
+        self._handle.deregister()
+        self._handle = None
+
+
+class LaunchpadConfigDtsSubscriber(DtsHandler):
+    """
+    This class subscribes to the launchpad configuration and alerts the tasklet
+    to any relevant changes.
+    """
+
+    @asyncio.coroutine
+    def register(self):
+        @asyncio.coroutine
+        def apply_config(dts, acg, xact, action, _):
+            if xact.xact is None:
+                # When RIFT first comes up, an INSTALL is called with the current config
+                # Since confd doesn't actally persist data this never has any data so
+                # skip this for now.
+                self.log.debug("No xact handle. Skipping apply config")
+                return
+
+            try:
+                cfg = list(self.reg.get_xact_elements(xact))[0]
+                if cfg.public_ip != self.tasklet.public_ip:
+                    yield from self.tasklet.on_public_ip(cfg.public_ip)
+
+            except Exception as e:
+                self.log.exception(e)
+
+        try:
+            acg_handler = rift.tasklets.AppConfGroup.Handler(
+                            on_apply=apply_config,
+                            )
+
+            with self.dts.appconf_group_create(acg_handler) as acg:
+                self.reg = acg.register(
+                        xpath="C,/rw-launchpad:launchpad-config",
+                        flags=rwdts.Flag.SUBSCRIBER,
+                        )
+
+        except Exception as e:
+            self.log.exception(e)
+
+
+class CreateAlarmRPC(DtsHandler):
+    """
+    This class is used to listen for RPC calls to /vnfr:create-alarm, and pass
+    them on to the tasklet.
+    """
+
+    def __init__(self, tasklet):
+        super().__init__(tasklet)
+        self._handle = None
+
+    @asyncio.coroutine
+    def register(self):
+        """Register this handler with DTS"""
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            try:
+                response = VnfrYang.YangOutput_Vnfr_CreateAlarm()
+                response.alarm_id = yield from self.tasklet.on_create_alarm(
+                        msg.cloud_account,
+                        msg.vdur_id,
+                        msg.alarm,
+                        )
+
+                xact_info.respond_xpath(
+                        rwdts.XactRspCode.ACK,
+                        "O,/vnfr:create-alarm",
+                        response,
+                        )
+
+            except Exception as e:
+                self.log.exception(e)
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+
+        self._handle = yield from self.dts.register(
+                xpath="I,/vnfr:create-alarm",
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    def deregister(self):
+        """Deregister this handler"""
+        self._handle.deregister()
+        self._handle = None
+
+
+class DestroyAlarmRPC(DtsHandler):
+    """
+    This class is used to listen for RPC calls to /vnfr:destroy-alarm, and pass
+    them on to the tasklet.
+    """
+
+    def __init__(self, tasklet):
+        super().__init__(tasklet)
+        self._handle = None
+
+    @asyncio.coroutine
+    def register(self):
+        """Register this handler with DTS"""
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            try:
+                yield from self.tasklet.on_destroy_alarm(
+                        msg.cloud_account,
+                        msg.alarm_id,
+                        )
+
+                xact_info.respond_xpath(
+                        rwdts.XactRspCode.ACK,
+                        "O,/vnfr:destroy-alarm"
+                        )
+
+            except Exception as e:
+                self.log.exception(e)
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+
+        self._handle = yield from self.dts.register(
+                xpath="I,/vnfr:destroy-alarm",
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    def deregister(self):
+        """Deregister this handler"""
+        self._handle.deregister()
+        self._handle = None
+
+
+class Delegate(object):
+    """
+    This class is used to delegate calls to collections of listener objects.
+    The listeners are expected to conform to the required function arguments,
+    but this is not enforced by the Delegate class itself.
+    """
+
+    def __init__(self):
+        self._listeners = list()
+
+    def __call__(self, *args, **kwargs):
+        """Delegate the call to the registered listeners"""
+        for listener in self._listeners:
+            listener(*args, **kwargs)
+
+    def register(self, listener):
+        """Register a listener
+
+        Arguments:
+            listener - an object that function calls will be delegated to
+
+        """
+        self._listeners.append(listener)
+
+
+class WebhookHandler(tornado.web.RequestHandler):
+    @property
+    def log(self):
+        return self.application.tasklet.log
+
+    def options(self, *args, **kargs):
+        pass
+
+    def set_default_headers(self):
+        self.set_header('Access-Control-Allow-Origin', '*')
+        self.set_header('Access-Control-Allow-Headers', 'Content-Type, Cache-Control, Accept, X-Requested-With, Authorization')
+        self.set_header('Access-Control-Allow-Methods', 'POST')
+
+    def post(self, action, vim_id):
+        pass
+
+
+class WebhookApplication(tornado.web.Application):
+    DEFAULT_WEBHOOK_PORT = 4568
+
+    def __init__(self, tasklet):
+        self.tasklet = tasklet
+
+        super().__init__([
+                (r"/([^/]+)/([^/]+)/?", WebhookHandler),
+                ])
+
+
+class MonitorTasklet(rift.tasklets.Tasklet):
+    """
+    The MonitorTasklet provides a interface for DTS to interact with an
+    instance of the Monitor class. This allows the Monitor class to remain
+    independent of DTS.
+    """
+
+    DEFAULT_POLLING_PERIOD = 1.0
+
+    def __init__(self, *args, **kwargs):
+        try:
+            super().__init__(*args, **kwargs)
+            self.rwlog.set_category("rw-monitor-log")
+
+            self.vnfr_subscriber = VnfrCatalogSubscriber(self)
+            self.cloud_cfg_subscriber = CloudAccountDtsHandler(self)
+            self.ns_instance_config_subscriber = NsInstanceConfigSubscriber(self)
+            self.launchpad_cfg_subscriber = LaunchpadConfigDtsSubscriber(self)
+
+            self.config = core.InstanceConfiguration()
+            self.config.polling_period = MonitorTasklet.DEFAULT_POLLING_PERIOD
+
+            self.monitor = core.Monitor(self.loop, self.log, self.config)
+            self.vdur_handlers = dict()
+
+            self.webhooks = None
+            self.create_alarm_rpc = CreateAlarmRPC(self)
+            self.destroy_alarm_rpc = DestroyAlarmRPC(self)
+
+
+        except Exception as e:
+            self.log.exception(e)
+
+    @property
+    def polling_period(self):
+        return self.config.polling_period
+
+    @property
+    def public_ip(self):
+        """The public IP of the launchpad"""
+        return self.config.public_ip
+
+    def start(self):
+        super().start()
+        self.log.info("Starting MonitoringTasklet")
+
+        self.log.debug("Registering with dts")
+        self.dts = rift.tasklets.DTS(
+                self.tasklet_info,
+                RwLaunchpadYang.get_schema(),
+                self.loop,
+                self.on_dts_state_change
+                )
+
+        self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+    def stop(self):
+      try:
+          self.dts.deinit()
+      except Exception as e:
+          self.log.exception(e)
+
+    @asyncio.coroutine
+    def init(self):
+        self.log.debug("creating cloud account handler")
+        self.cloud_cfg_subscriber.register()
+
+        self.log.debug("creating launchpad config subscriber")
+        yield from self.launchpad_cfg_subscriber.register()
+
+        self.log.debug("creating NS instance config subscriber")
+        yield from  self.ns_instance_config_subscriber.register()
+
+        self.log.debug("creating vnfr subscriber")
+        yield from self.vnfr_subscriber.register()
+
+        self.log.debug("creating create-alarm rpc handler")
+        yield from self.create_alarm_rpc.register()
+
+        self.log.debug("creating destroy-alarm rpc handler")
+        yield from self.destroy_alarm_rpc.register()
+
+        self.log.debug("creating webhook server")
+        loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
+        self.webhooks = WebhookApplication(self)
+        self.server = tornado.httpserver.HTTPServer(
+            self.webhooks,
+            io_loop=loop,
+        )
+
+    @asyncio.coroutine
+    def on_public_ip(self, ip):
+        """Store the public IP of the launchpad
+
+        Arguments:
+            ip - a string containing the public IP address of the launchpad
+
+        """
+        self.config.public_ip = ip
+
+    def on_ns_instance_config_update(self, config):
+        """Update configuration information
+
+        Arguments:
+            config - an NsInstanceConfig object
+
+        """
+        if config.nfvi_polling_period is not None:
+            self.config.polling_period = config.nfvi_polling_period
+
+    def on_cloud_account_create(self, account):
+        self.monitor.add_cloud_account(account.cal_account_msg)
+
+    def on_cloud_account_delete(self, account_name):
+        self.monitor.remove_cloud_account(account_name)
+
+    @asyncio.coroutine
+    def run(self):
+        self.webhooks.listen(WebhookApplication.DEFAULT_WEBHOOK_PORT)
+
+    def on_instance_started(self):
+        self.log.debug("Got instance started callback")
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Handle DTS state change
+
+        Take action according to current DTS state to transition application
+        into the corresponding application state
+
+        Arguments
+            state - current dts state
+
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self.dts.handle.set_state(next_state)
+
+    def on_vnfr_create(self, vnfr):
+        if not self.monitor.nfvi_metrics_available(vnfr.cloud_account):
+            msg = "NFVI metrics unavailable for {}"
+            self.log.warning(msg.format(vnfr.cloud_account))
+            return
+
+        self.monitor.add_vnfr(vnfr)
+
+        # Create NFVI handlers for VDURs
+        for vdur in vnfr.vdur:
+            if vdur.vim_id is not None:
+                coro = self.register_vdur_nfvi_handler(vnfr, vdur)
+                self.loop.create_task(coro)
+
+    def on_vnfr_update(self, vnfr):
+        if not self.monitor.nfvi_metrics_available(vnfr.cloud_account):
+            msg = "NFVI metrics unavailable for {}"
+            self.log.warning(msg.format(vnfr.cloud_account))
+            return
+
+        self.monitor.update_vnfr(vnfr)
+
+        # TODO handle the removal of vdurs
+        for vdur in vnfr.vdur:
+            if vdur.vim_id is not None:
+                coro = self.register_vdur_nfvi_handler(vnfr, vdur)
+                self.loop.create_task(coro)
+
+    def on_vnfr_delete(self, vnfr):
+        self.monitor.remove_vnfr(vnfr.id)
+
+        # Delete any NFVI handlers associated with the VNFR
+        for vdur in vnfr.vdur:
+            self.deregister_vdur_nfvi_handler(vdur.id)
+
+    def on_retrieve_nfvi_metrics(self, vdur_id):
+        return self.monitor.retrieve_nfvi_metrics(vdur_id)
+
+    @asyncio.coroutine
+    def register_vdur_nfvi_handler(self, vnfr, vdur):
+        if vdur.vim_id is None:
+            return
+
+        if vdur.operational_status != "running":
+            return
+
+        if vdur.id not in self.vdur_handlers:
+            publisher = VdurNfviMetricsPublisher(self, vnfr, vdur)
+            yield from publisher.register()
+            self.vdur_handlers[vdur.id] = publisher
+
+    def deregister_vdur_nfvi_handler(self, vdur_id):
+        if vdur_id in self.vdur_handlers:
+            handler = self.vdur_handlers[vdur_id]
+
+            del self.vdur_handlers[vdur_id]
+            handler.deregister()
+
+    @asyncio.coroutine
+    def on_create_alarm(self, account, vdur_id, alarm):
+        """Creates an alarm and returns an alarm ID
+
+        Arguments:
+            account - a name of the cloud account used to authenticate the
+                      creation of an alarm
+            vdur_id - the identifier of VDUR to create the alarm for
+            alarm   - a structure defining the alarm that should be created
+
+        Returns:
+            An identifier specific to the created alarm
+
+        """
+        return (yield from self.monitor.create_alarm(account, vdur_id, alarm))
+
+    @asyncio.coroutine
+    def on_destroy_alarm(self, account, alarm_id):
+        """Destroys an alarm with the specified identifier
+
+        Arguments:
+            account  - the name of the cloud account used to authenticate the
+                       destruction of the alarm
+            alarm_id - the identifier of the alarm to destroy
+
+        """
+        yield from self.monitor.destroy_alarm(account, alarm_id)
diff --git a/rwlaunchpad/plugins/rwmonitor/rwmonitor.py b/rwlaunchpad/plugins/rwmonitor/rwmonitor.py
new file mode 100755
index 0000000..497e917
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonitor/rwmonitor.py
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwmonitor
+
+class Tasklet(rift.tasklets.rwmonitor.MonitorTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwlaunchpad/plugins/rwmonparam/CMakeLists.txt b/rwlaunchpad/plugins/rwmonparam/CMakeLists.txt
new file mode 100644
index 0000000..ad63593
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonparam/CMakeLists.txt
@@ -0,0 +1,41 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Varun Prasad
+# Creation Date: 2016/07/01
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwmonparam)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/aggregator.py
+    rift/tasklets/${TASKLET_NAME}/nsr_core.py
+    rift/tasklets/${TASKLET_NAME}/vnfr_core.py
+    rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/__init__.py b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/__init__.py
new file mode 100644
index 0000000..b775943
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/__init__.py
@@ -0,0 +1 @@
+from .rwmonparam import MonitoringParameterTasklet
diff --git a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/aggregator.py b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/aggregator.py
new file mode 100644
index 0000000..47b1d15
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/aggregator.py
@@ -0,0 +1,160 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file aggregator.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 09-Jul-2016
+
+"""
+import abc
+import functools
+
+
+class IncompatibleAggregationType(Exception):
+    pass
+
+class InvalidAggregationType(Exception):
+    pass
+
+class InvalidAggregationOperation(Exception):
+    pass
+
+class InvalidAggregationValues(Exception):
+    pass
+
+
+def make_aggregator(field_types):
+    """A factory method to create the aggregator based on the field type
+    [value_interger, value_string or value_decimal] 
+    
+    Args:
+        field_types (list): list of field types to aggregate
+        values (list): List of values
+        aggregation_type (str): Type of aggregation.
+    
+    Returns:
+        subclass of ValueAggregator
+    
+    Raises:
+        InvalidAggregationType: If Unknown aggregation type is provided
+        InvalidAggregationValues: Raised if a mix of field types are provided.
+    """
+    if len(set(field_types)) != 1:
+        raise InvalidAggregationValues(
+            "Multiple value types provided for aggrgation {}".format(field_types))
+
+    field_type = field_types[0]
+
+    if field_type == IntValueAggregator.field_name():
+        return IntValueAggregator()
+    elif field_type == DecimalValueAggregator.field_name():
+        return DecimalValueAggregator()
+    elif field_type == StringValueAggregator.field_name():
+        return StringValueAggregator()
+
+    raise InvalidAggregationType("Invalid aggregation type")
+
+
+class ValueAggregator():
+    """Base class that defines all the basic operations.
+    
+    Attributes:
+        aggregation_type (str): Aggregation type to be used to select the
+                appropriate method.
+        values (list): List of values to aggregate.
+    """
+    @classmethod
+    @abc.abstractmethod
+    def field_name(self):
+        pass
+
+    def average(self, values):
+        raise InvalidAggregationOperation(
+                "Invalid operation AVERAGE for {}".format(self.values))
+
+    def sum(self, values):
+        raise InvalidAggregationOperation(
+                "Invalid operation SUM for {}".format(self.values))
+
+    def maximum(self, values):
+        raise InvalidAggregationOperation(
+                "Invalid operation MAXIMUM for {}".format(self.values))
+
+    def minimum(self, values):
+        raise InvalidAggregationOperation(
+                "Invalid operation MINIMUM for {}".format(self.values))
+
+    def count(self, values):
+        raise InvalidAggregationOperation(
+                "Invalid operation COUNT for {}".format(self.values))
+
+    def aggregate(self, aggregation_type, values):
+        OP_MAP = {
+                "AVERAGE": self.average,
+                "SUM": self.sum,
+                "MAXIMUM": self.maximum,
+                "MINIMUM": self.minimum,
+                "COUNT": self.count
+            }
+
+        op_func = OP_MAP.get(aggregation_type, None)
+
+        if op_func is None:
+            raise InvalidAggregationType("Unknown Aggregation type provided.")
+
+        return self.field_name(), op_func(values)
+
+
+class StringValueAggregator(ValueAggregator):
+
+    @classmethod
+    def field_name(self):
+        return "value_string"
+
+
+class DecimalValueAggregator(ValueAggregator):
+
+    @classmethod
+    def field_name(self):
+        return "value_decimal"
+
+    def average(self, values):
+        avg = functools.reduce(lambda x, y: x + y, values) / len(values)
+        return avg
+
+    def sum(self, values):
+        return functools.reduce(lambda x, y: x + y, values)
+
+    def maximum(self, values):
+        return max(values)
+
+    def minimum(self, values):
+        return min(values)
+
+    def count(self, values):
+        return len(values)
+
+
+class IntValueAggregator(DecimalValueAggregator):
+
+    @classmethod
+    def field_name(self):
+        return "value_integer"
+
+    def average(self, values):
+        avg = functools.reduce(lambda x, y: x + y, values) / len(values)
+        return int(avg)
diff --git a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/nsr_core.py b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/nsr_core.py
new file mode 100644
index 0000000..b1b9cd0
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/nsr_core.py
@@ -0,0 +1,417 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file nsr_core.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 09-Jul-2016
+
+"""
+
+import asyncio
+import functools
+import uuid
+
+from gi.repository import (RwDts as rwdts, NsrYang)
+import rift.mano.dts as mano_dts
+
+from . import aggregator as aggregator
+
+
+class MissingValueField(Exception):
+    pass
+
+
+class VnfrMonitoringParamSubscriber(mano_dts.AbstractOpdataSubscriber):
+    """Registers for VNFR monitoring parameter changes.
+    
+    Attributes:
+        monp_id (str): Monitoring Param ID
+        vnfr_id (str): VNFR ID
+    """
+    def __init__(self, log, dts, loop, vnfr_id, monp_id, callback=None):
+        super().__init__(log, dts, loop, callback)
+        self.vnfr_id = vnfr_id
+        self.monp_id = monp_id
+
+    def get_xpath(self):
+        return("D,/vnfr:vnfr-catalog" +
+               "/vnfr:vnfr[vnfr:id='{}']".format(self.vnfr_id) +
+               "/vnfr:monitoring-param" +
+               "[vnfr:id='{}']".format(self.monp_id))
+
+
+class NsrMonitoringParam():
+    """Class that handles NS Mon-param data.
+    """
+    MonParamMsg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
+    MISSING = None
+    DEFAULT_AGGREGATION_TYPE = "AVERAGE"
+
+    @classmethod
+    def create_nsr_mon_params(cls, nsd, constituent_vnfrs, store):
+        """Convenience class that constructs NSMonitoringParam objects
+        
+        Args:
+            nsd (RwNsdYang.YangData_Nsd_NsdCatalog_Nsd): Nsd object
+            constituent_vnfrs (list): List of constituent vnfr objects of NSR
+            store (SubscriberStore): Store object instance
+        
+        Returns:
+            list NsrMonitoringParam object.
+
+        Also handles legacy NSD descriptor which has no mon-param defines. In
+        such cases the mon-params are created from VNFD's mon-param config.
+        """
+        MonParamMsg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
+
+        mon_params = []
+        for mon_param_msg in nsd.monitoring_param:
+            mon_params.append(NsrMonitoringParam(
+                    mon_param_msg,
+                    constituent_vnfrs
+                    ))
+
+        # Legacy Handling.
+        # This indicates that the NSD had no mon-param config.
+        if not nsd.monitoring_param:
+            for vnfr in constituent_vnfrs:
+                vnfd = store.get_vnfd(vnfr.vnfd_ref)
+                for monp in vnfd.monitoring_param:
+                    mon_params.append(NsrMonitoringParam(
+                        monp,
+                        [vnfr],
+                        is_legacy=True))
+
+        return mon_params
+
+    def __init__(self, monp_config, constituent_vnfrs, is_legacy=False):
+        """
+        Args:
+            monp_config (GiObject): Config data to create the NSR mon-param msg
+            constituent_vnfrs (list): List of VNFRs that may contain the mon-param
+            is_legacy (bool, optional): If set then the mon-param are created from
+                vnfd's config and not NSD's config.
+        """
+        self._constituent_vnfr_map = {vnfr.id:vnfr for vnfr in constituent_vnfrs}
+
+        # An internal store to hold the data
+        # Key => (vnfr_id, monp_id)
+        # value => (value_type, value)
+        self.vnfr_monparams = {}
+
+        if not is_legacy:
+            self._msg = self._convert_nsd_msg(monp_config)
+        else:
+            self._msg = self._convert_vnfd_msg(monp_config)
+
+    @property
+    def nsr_mon_param_msg(self):
+        """Gi object msg"""
+        return self._msg
+
+    @property
+    def vnfr_ids(self):
+        """Store Keys"""
+        return list(self.vnfr_monparams.keys())
+
+    @property
+    def vnfr_values(self):
+        """Store values"""
+        return list(self.vnfr_monparams.values())
+
+    @property
+    def is_ready(self):
+        """Flag which indicates if all of the constituent vnfr values are
+        available to perform the aggregation"""
+        return (self.MISSING not in self.vnfr_values)
+
+    @property
+    def aggregation_type(self):
+        """Aggregation type"""
+        return self.nsr_mon_param_msg.aggregation_type
+
+    @property
+    def is_legacy(self):
+        return (self.aggregation_type is None)
+
+    @classmethod
+    def extract_value(cls, monp):
+        """Class method to extract the value type and value from the 
+        mon-param gi message
+        
+        Args:
+            monp (GiObject): Mon param msg
+        
+        Returns:
+            Tuple: (value type, value)
+        
+        Raises:
+            MissingValueField: Raised if no valid field are available.
+        """
+        if monp.has_field("value_integer"):
+            return ("value_integer", monp.value_integer)
+        elif monp.has_field("value_decimal"):
+            return ("value_decimal", monp.value_decimal)
+        elif monp.has_field("value_string"):
+            return ("value_string", monp.value_string)
+
+        return None
+
+    def _constituent_vnfrs(self, constituent_vnfr_ids):
+        # Fetch the VNFRs
+        vnfr_map = {}
+        for constituent_vnfr in constituent_vnfr_ids:
+            vnfr_id = constituent_vnfr.vnfr_id
+            vnfr_map[vnfr_id] = self._store.get_vnfr(vnfr_id)
+
+        return vnfr_map
+
+    def _extract_ui_elements(self, monp):
+        ui_fields = ["group_tag", "description", "widget_type", "units", "value_type"]
+        ui_data = [getattr(monp, ui_field) for ui_field in ui_fields]
+
+        return dict(zip(ui_fields, ui_data))
+
+
+    def _convert_nsd_msg(self, nsd_monp):
+        """Create initial msg without values"""
+        vnfd_to_vnfr = {vnfr.vnfd_ref: vnfr_id
+                for vnfr_id, vnfr in self._constituent_vnfr_map.items()}
+
+        # First, convert the monp param ref from vnfd to vnfr terms.
+        vnfr_mon_param_ref = []
+        for vnfd_mon in nsd_monp.vnfd_monitoring_param:
+            vnfr_id = vnfd_to_vnfr[vnfd_mon.vnfd_id_ref]
+            monp_id = vnfd_mon.vnfd_monitoring_param_ref
+
+            self.vnfr_monparams[(vnfr_id, monp_id)] = self.MISSING
+
+            vnfr_mon_param_ref.append({
+                'vnfr_id_ref': vnfr_id,
+                'vnfr_mon_param_ref': monp_id
+                })
+
+        monp_fields = {
+                # For now both the NSD and NSR's monp ID are same.
+                'id': nsd_monp.id,
+                'name': nsd_monp.name,
+                'nsd_mon_param_ref': nsd_monp.id,
+                'vnfr_mon_param_ref': vnfr_mon_param_ref,
+                'aggregation_type': nsd_monp.aggregation_type
+            }
+
+        ui_fields = self._extract_ui_elements(nsd_monp)
+        monp_fields.update(ui_fields)
+        monp = self.MonParamMsg.from_dict(monp_fields)
+
+        return monp
+
+    def _convert_vnfd_msg(self, vnfd_monp):
+
+        vnfr = list(self._constituent_vnfr_map.values())[0]
+        self.vnfr_monparams[(vnfr.id, vnfd_monp.id)] = self.MISSING
+
+        monp_data = {
+                'id': str(uuid.uuid1()),
+                'name': vnfd_monp.name,
+                'vnfr_mon_param_ref': [{
+                    'vnfr_id_ref': vnfr.id,
+                    'vnfr_mon_param_ref': vnfd_monp.id
+                    }]
+                }
+
+        ui_fields = self._extract_ui_elements(vnfd_monp)
+        monp_data.update(ui_fields)
+        monp = self.MonParamMsg.from_dict(monp_data)
+
+        return monp
+
+    def update_vnfr_value(self, key, value):
+        """Update the internal store
+
+        Args:
+            key (Tuple): (vnfr_id, monp_id)
+            value (Tuple): (value_type, value)
+        """
+        self.vnfr_monparams[key] = value
+
+    def update_ns_value(self, value_field, value):
+        """Updates the NS mon-param data with the aggregated value.
+
+        Args:
+            value_field (str): Value field in NSR
+            value : Aggregated value
+        """
+        setattr(self.nsr_mon_param_msg, value_field, value)
+
+
+class NsrMonitoringParamPoller(mano_dts.DtsHandler):
+    """Handler responsible for publishing NS level monitoring
+    parameters.
+
+    Design:
+        1. Created subscribers for each vnfr's monitoring parameter
+        2. Accumulates the VNFR's value into the NsrMonitoringParam's internal
+            store.
+        3. Once all values are available, aggregate the value and triggers
+            callback notification to the subscribers.
+    """
+    @classmethod
+    def from_handler(cls, handler, monp, callback):
+        """Convenience class to build NsrMonitoringParamPoller object.
+        """
+        return cls(handler.log, handler.dts, handler.loop, monp, callback)
+
+    def __init__(self, log, dts, loop, monp, callback=None):
+        """
+        Args:
+            monp (NsrMonitoringParam): Param object
+            callback (None, optional): Callback to be triggered after value has
+                been aggregated.
+        """
+        super().__init__(log, dts, loop)
+
+        self.monp = monp
+        self.subscribers = []
+        self.callback = callback
+        self._agg = None
+
+    def make_aggregator(self, field_types):
+        if not self._agg:
+            self._agg = aggregator.make_aggregator(field_types)
+        return self._agg
+
+
+    def update_value(self, monp, action, vnfr_id):
+        """Callback that gets triggered when VNFR's mon param changes.
+
+        Args:
+            monp (Gi Object): Gi object msg
+            action (rwdts.QueryAction)): Action type
+            vnfr_id (str): Vnfr ID
+        """
+        key = (vnfr_id, monp.id)
+        value = NsrMonitoringParam.extract_value(monp)
+
+        if not value:
+            return
+
+        # Accumulate the value
+        self.monp.update_vnfr_value(key, value)
+
+        # If all values are not available, then don't start
+        # the aggregation process.
+        if not self.monp.is_ready:
+            return
+
+        if self.monp.is_legacy:
+            # If no monp are specified then copy over the vnfr's monp data
+            value_field, value = value
+        else:
+            field_types, values = zip(*self.monp.vnfr_values)
+
+            value_field, value = self.make_aggregator(field_types).aggregate(
+                    self.monp.aggregation_type,
+                    values)
+
+        self.monp.update_ns_value(value_field, value)
+        if self.callback:
+            self.callback(self.monp.nsr_mon_param_msg)
+
+    @asyncio.coroutine
+    def register(self):
+        for vnfr_id, monp_id in self.monp.vnfr_ids:
+            callback = functools.partial(self.update_value, vnfr_id=vnfr_id)
+            self.subscribers.append(VnfrMonitoringParamSubscriber(
+                self.loop, self.dts, self.loop, vnfr_id, monp_id, callback=callback))
+
+    @asyncio.coroutine
+    def start(self):
+        for sub in self.subscribers:
+            yield from sub.register()
+
+    def stop(self):
+        for sub in self.subscribers:
+            sub.deregister()
+
+
+class NsrMonitorDtsHandler(mano_dts.DtsHandler):
+    """ NSR monitoring class """
+
+    def __init__(self, log, dts, loop, nsr, constituent_vnfrs, store):
+        """
+        Args:
+            nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): NSR object
+            constituent_vnfrs (list): list of VNFRs in NSR
+            store (SubscriberStore): Store instance
+        """
+        super().__init__(log, dts, loop)
+
+        self.nsr = nsr
+        self.store = store
+        self.constituent_vnfrs = constituent_vnfrs
+        self.mon_params_pollers = []
+
+    def xpath(self, param_id=None):
+        return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
+            "[nsr:ns-instance-config-ref='{}']".format(self.nsr.ns_instance_config_ref) +
+            "/nsr:monitoring-param" +
+            ("[nsr:id='{}']".format(param_id) if param_id else ""))
+
+    @asyncio.coroutine
+    def register(self):
+        self.reg = yield from self.dts.register(xpath=self.xpath(),
+                  flags=rwdts.Flag.PUBLISHER|rwdts.Flag.CACHE|rwdts.Flag.NO_PREP_READ)
+
+        assert self.reg is not None
+
+    def callback(self, nsr_mon_param_msg):
+        """Callback that triggers update.
+        """
+        self.reg.update_element(
+                self.xpath(param_id=nsr_mon_param_msg.id),
+                nsr_mon_param_msg)
+
+    @asyncio.coroutine
+    def start(self):
+        nsd = self.store.get_nsd(self.nsr.nsd_ref)
+        mon_params = NsrMonitoringParam.create_nsr_mon_params(
+                nsd,
+                self.constituent_vnfrs,
+                self.store)
+
+        for monp in mon_params:
+            poller = NsrMonitoringParamPoller.from_handler(
+                    self,
+                    monp,
+                    callback=self.callback)
+
+            self.mon_params_pollers.append(poller)
+            yield from poller.register()
+            yield from poller.start()
+
+    def stop(self):
+        self.deregister()
+        for poller in self.mon_params_pollers:
+            poller.stop()
+
+
+    def deregister(self):
+        """ de-register with dts """
+        if self.reg is not None:
+            self.reg.deregister()
+            self.reg = None
diff --git a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/rwmonparam.py b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/rwmonparam.py
new file mode 100644
index 0000000..d0f31e3
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/rwmonparam.py
@@ -0,0 +1,216 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file rwmonparam.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 01-Jul-2016
+
+"""
+
+import asyncio
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+
+from gi.repository import (
+        RwDts as rwdts,
+        RwLaunchpadYang,
+        ProtobufC)
+import rift.mano.cloud
+import rift.mano.dts as subscriber
+import rift.tasklets
+
+from . import vnfr_core
+from . import nsr_core
+
+
+class MonitoringParameterTasklet(rift.tasklets.Tasklet):
+    """The main task of this Tasklet is to listen for VNFR changes and once the
+    VNFR hits the running state, triggers the monitor.
+    """
+    def __init__(self, *args, **kwargs):
+        try:
+            super().__init__(*args, **kwargs)
+            self.rwlog.set_category("rw-monitor-log")
+        except Exception as e:
+            self.log.exception(e)
+
+        self.vnfr_subscriber = None
+        self.store = None
+
+        self.vnfr_monitors = {}
+        self.nsr_monitors = {}
+
+        # Needs to be moved to store once the DTS bug is resolved
+        self.vnfrs = {}
+
+    def start(self):
+        super().start()
+
+        self.log.info("Starting MonitoringParameterTasklet")
+        self.log.debug("Registering with dts")
+
+        self.dts = rift.tasklets.DTS(
+                self.tasklet_info,
+                RwLaunchpadYang.get_schema(),
+                self.loop,
+                self.on_dts_state_change
+                )
+
+        self.vnfr_subscriber = subscriber.VnfrCatalogSubscriber.from_tasklet(
+                self,
+                callback=self.handle_vnfr)
+        self.nsr_subsriber = subscriber.NsrCatalogSubscriber.from_tasklet(
+                self,
+                callback=self.handle_nsr)
+
+        self.store = subscriber.SubscriberStore.from_tasklet(self)
+
+        self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+    def stop(self):
+      try:
+          self.dts.deinit()
+      except Exception as e:
+          self.log.exception(e)
+
+    @asyncio.coroutine
+    def init(self):
+        self.log.debug("creating vnfr subscriber")
+        yield from self.store.register()
+        yield from self.vnfr_subscriber.register()
+        yield from self.nsr_subsriber.register()
+
+    @asyncio.coroutine
+    def run(self):
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Handle DTS state change
+
+        Take action according to current DTS state to transition application
+        into the corresponding application state
+
+        Arguments
+            state - current dts state
+
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self.dts.handle.set_state(next_state)
+
+    def handle_vnfr(self, vnfr, action):
+        """Starts a monitoring parameter job for every VNFR that reaches
+        running state
+
+        Args:
+            vnfr (GiOBject): VNFR Gi object message from DTS
+            delete_mode (bool, optional): if set, stops and removes the monitor.
+        """
+
+        def vnfr_create():
+            # if vnfr.operational_status == "running" and vnfr.id not in self.vnfr_monitors:
+            if vnfr.config_status == "configured" and vnfr.id not in self.vnfr_monitors:
+
+                vnf_mon = vnfr_core.VnfMonitorDtsHandler.from_vnf_data(
+                        self,
+                        vnfr,
+                        self.store.get_vnfd(vnfr.vnfd_ref))
+
+                self.vnfr_monitors[vnfr.id] = vnf_mon
+                self.vnfrs[vnfr.id] = vnfr
+
+                @asyncio.coroutine
+                def task():
+                    yield from vnf_mon.register()
+                    vnf_mon.start()
+
+                self.loop.create_task(task())
+
+
+        def vnfr_delete():
+            if vnfr.id in self.vnfr_monitors:
+                self.log.debug("VNFR %s deleted: Stopping vnfr monitoring", vnfr.id)
+                vnf_mon = self.vnfr_monitors.pop(vnfr.id)
+                vnf_mon.stop()
+                self.vnfrs.pop(vnfr.id)
+
+        if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+            vnfr_create()
+        elif action == rwdts.QueryAction.DELETE:
+            vnfr_delete()
+
+
+    def handle_nsr(self, nsr, action):
+        """Callback for NSR opdata changes. Creates a publisher for every
+        NS that moves to config state.
+
+        Args:
+            nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): Ns Opdata
+            action (rwdts.QueryAction): Action type of the change.
+        """
+        def nsr_create():
+            # if nsr.operational_status == "running" and nsr.ns_instance_config_ref not in self.nsr_monitors:
+            if nsr.config_status == "configured" and nsr.ns_instance_config_ref not in self.nsr_monitors:
+                nsr_mon = nsr_core.NsrMonitorDtsHandler(
+                        self.log,
+                        self.dts,
+                        self.loop,
+                        nsr,
+                        list(self.vnfrs.values()),
+                        self.store
+                        )
+
+                self.nsr_monitors[nsr.ns_instance_config_ref] = nsr_mon
+
+                @asyncio.coroutine
+                def task():
+                    yield from nsr_mon.register()
+                    yield from nsr_mon.start()
+
+                self.loop.create_task(task())
+
+
+
+        def nsr_delete():
+            if nsr.ns_instance_config_ref in self.nsr_monitors:
+            # if vnfr.operational_status == "running" and vnfr.id in self.vnfr_monitors:
+                nsr_mon = self.nsr_monitors.pop(nsr.ns_instance_config_ref)
+                nsr_mon.stop()
+
+        if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+            nsr_create()
+        elif action == rwdts.QueryAction.DELETE:
+            nsr_delete()
diff --git a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/vnfr_core.py b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/vnfr_core.py
new file mode 100644
index 0000000..e798376
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/vnfr_core.py
@@ -0,0 +1,700 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import logging
+import collections
+import concurrent
+import types
+
+import requests
+import requests.auth
+import tornado.escape
+
+from requests.packages.urllib3.exceptions import InsecureRequestWarning
+
+import gi
+gi.require_version('RwDts', '1.0')
+import rift.tasklets
+from gi.repository import (
+    RwDts as rwdts,
+    VnfrYang
+    )
+import rift.mano.dts as mano_dts
+import rwlogger
+
+
+class MonitoringParamError(Exception):
+    """Monitoring Parameter error"""
+    pass
+
+
+class JsonPathValueQuerier(object):
+    def __init__(self, log, json_path):
+        self._log = log
+        self._json_path = json_path
+        self._json_path_expr = None
+
+        try:
+            import jsonpath_rw
+            self._json_path_expr = jsonpath_rw.parse(self._json_path)
+        except Exception as e:
+            self._log.error("Could not create json_path parser: %s", str(e))
+
+    def query(self, json_msg):
+        try:
+            json_dict = tornado.escape.json_decode(json_msg)
+        except ValueError as e:
+            msg = "Failed to convert response into json"
+            self._log.warning(msg)
+            raise MonitoringParamError(e)
+
+        if self._json_path_expr is None:
+            raise MonitoringParamError(
+                    "Parser not created.  Unable to extract value from %s" % json_msg
+                    )
+
+        try:
+            matches = self._json_path_expr.find(json_dict)
+            values = [m.value for m in matches]
+        except Exception as e:
+            raise MonitoringParamError(
+                    "Failed to run find using json_path (%s) against json_msg: %s" %
+                    (self._json_path, str(e))
+                    )
+
+        if len(values) == 0:
+            raise MonitoringParamError(
+                    "No values found from json_path (%s)" % self._json_path
+                    )
+
+        if len(values) > 1:
+            self._log.debug("Got multiple values from json_path (%s).  Only returning the first.",
+                            self._json_path)
+
+        return values[0]
+
+
+class ObjectPathValueQuerier(object):
+    def __init__(self, log, object_path):
+        self._log = log
+        self._object_path = object_path
+        self._object_path_expr = None
+
+    def query(self, object_msg):
+        try:
+            object_dict = tornado.escape.json_decode(object_msg)
+        except ValueError as e:
+            msg = "Failed to convert response into object"
+            self._log.warning(msg)
+            raise MonitoringParamError(e)
+
+        import objectpath
+        try:
+            tree = objectpath.Tree(object_dict)
+        except Exception as e:
+            msg = "Could not create objectpath tree: %s", str(e)
+            self._log.error(msg)
+            raise MonitoringParamError(msg)
+
+        try:
+            value = tree.execute(self._object_path)
+        except Exception as e:
+            raise MonitoringParamError(
+                    "Failed to run execute object_path (%s) against object_msg: %s" %
+                    (self._object_path, str(e))
+                    )
+
+        if isinstance(value, types.GeneratorType):
+            try:
+                value = next(value)
+            except Exception as e:
+                raise MonitoringParamError(
+                        "Failed to get value from objectpath %s execute generator: %s" %
+                        (self._object_path, str(e))
+                        )
+
+        if isinstance(value, (list, tuple)):
+            if len(value) == 0:
+                raise MonitoringParamError(
+                        "No values found from object_path (%s)" % self._object_path
+                        )
+
+            elif len(value) > 1:
+                self._log.debug(
+                        "Got multiple values from object_path (%s).  "
+                        "Only returning the first.", self._object_path
+                        )
+
+            # Only take the first element
+            value = value[0]
+
+        return value
+
+
+class JsonKeyValueQuerier(object):
+    def __init__(self, log, key):
+        self._log = log
+        self._key = key
+
+    def query(self, json_msg):
+        try:
+            json_dict = tornado.escape.json_decode(json_msg)
+        except ValueError as e:
+            msg = "Failed to convert response into json"
+            self._log.warning(msg)
+            raise MonitoringParamError(e)
+
+        if self._key not in json_dict:
+            msg = "Did not find '{}' key in response: {}".format(
+                    self._key, json_dict
+                    )
+            self._log.warning(msg)
+            raise MonitoringParamError(msg)
+
+        value = json_dict[self._key]
+
+        return value
+
+
+class ValueConverter(object):
+    def __init__(self, value_type):
+        self._value_type = value_type
+
+    def _convert_int(self, value):
+        if isinstance(value, int):
+            return value
+
+        try:
+            return int(value)
+        except (ValueError, TypeError) as e:
+            raise MonitoringParamError(
+                    "Could not convert value into integer: %s", str(e)
+                    )
+
+    def _convert_text(self, value):
+        if isinstance(value, str):
+            return value
+
+        try:
+            return str(value)
+        except (ValueError, TypeError) as e:
+            raise MonitoringParamError(
+                    "Could not convert value into string: %s", str(e)
+                    )
+
+    def _convert_decimal(self, value):
+        if isinstance(value, float):
+            return value
+
+        try:
+            return float(value)
+        except (ValueError, TypeError) as e:
+            raise MonitoringParamError(
+                    "Could not convert value into string: %s", str(e)
+                    )
+
+    def convert(self, value):
+        if self._value_type == "INT":
+            return self._convert_int(value)
+        elif self._value_type == "DECIMAL":
+            return self._convert_decimal(value)
+        elif self._value_type == "STRING":
+            return self._convert_text(value)
+        else:
+            raise MonitoringParamError("Unknown value type: %s", self._value_type)
+
+
+class HTTPBasicAuth(object):
+    def __init__(self, username, password):
+        self.username = username
+        self.password = password
+
+
+class HTTPEndpoint(object):
+    def __init__(self, log, loop, ip_address, ep_msg):
+        self._log = log
+        self._loop = loop
+        self._ip_address = ip_address
+        self._ep_msg = ep_msg
+
+        # This is to suppress HTTPS related warning as we do not support
+        # certificate verification yet
+        requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+        self._session = requests.Session()
+        self._auth = None
+        self._headers = None
+
+    @property
+    def poll_interval(self):
+        return self._ep_msg.polling_interval_secs
+
+    @property
+    def ip_address(self):
+        return self._ip_address
+
+    @property
+    def port(self):
+        return self._ep_msg.port
+
+    @property
+    def protocol(self):
+        if self._ep_msg.has_field("https"):
+           if self._ep_msg.https is True:
+               return "https"
+
+        return "http"
+
+    @property
+    def path(self):
+        return self._ep_msg.path
+
+    @property
+    def method(self):
+        if self._ep_msg.has_field("method"):
+           return self._ep_msg.method
+        return "GET"
+
+    @property
+    def username(self):
+        if self._ep_msg.has_field("username"):
+            return self._ep_msg.username
+
+        return None
+
+    @property
+    def headers(self):
+        if self._headers is None:
+            headers = {}
+            for header in self._ep_msg.headers:
+                if header.has_field("key") and header.has_field("value"):
+                    headers[header.key] = header.value
+
+            self._headers = headers
+
+        return self._headers
+
+    @property
+    def password(self):
+        if self._ep_msg.has_field("password"):
+            return self._ep_msg.password
+
+        return None
+
+    @property
+    def auth(self):
+        if self._auth is None:
+            if self.username is not None and self.password is not None:
+                self._auth = requests.auth.HTTPBasicAuth(
+                        self.username,
+                        self.password,
+                        )
+
+        return self._auth
+
+    @property
+    def url(self):
+        url = "{protocol}://{ip_address}:{port}/{path}".format(
+                protocol=self.protocol,
+                ip_address=self.ip_address,
+                port=self.port,
+                path=self.path.lstrip("/"),
+                )
+
+        return url
+
+    def _poll(self):
+        try:
+            resp = self._session.request(
+                    self.method, self.url, timeout=10, auth=self.auth,
+                    headers=self.headers, verify=False
+                    )
+            resp.raise_for_status()
+        except requests.exceptions.RequestException as e:
+            msg = "Got HTTP error when request monitoring method {} from url {}: {}".format(
+                    self.method,
+                    self.url,
+                    str(e),
+                    )
+            self._log.warning(msg)
+            raise MonitoringParamError(msg)
+
+        return resp.text
+
+    @asyncio.coroutine
+    def poll(self):
+        try:
+            with concurrent.futures.ThreadPoolExecutor(1) as executor:
+                resp = yield from self._loop.run_in_executor(
+                        executor,
+                        self._poll,
+                        )
+
+        except MonitoringParamError as e:
+            msg = "Caught exception when polling http endpoint: %s" % str(e)
+            self._log.warning(msg)
+            raise MonitoringParamError(msg)
+
+        self._log.debug("Got response from http endpoint (%s): %s",
+                        self.url, resp)
+
+        return resp
+
+
+class MonitoringParam(object):
+    def __init__(self, log, vnfr_mon_param_msg):
+        self._log = log
+        self._vnfr_mon_param_msg = vnfr_mon_param_msg
+
+        self._current_value = None
+
+        self._json_querier = self._create_json_querier()
+        self._value_converter = ValueConverter(self.value_type)
+
+    def _create_json_querier(self):
+        if self.msg.json_query_method == "NAMEKEY":
+            return JsonKeyValueQuerier(self._log, self.msg.name)
+        elif self.msg.json_query_method == "JSONPATH":
+            if not self.msg.json_query_params.has_field("json_path"):
+                msg = "JSONPATH query_method requires json_query_params.json_path to be filled in %s"
+                self._log.error(msg, self.msg)
+                raise ValueError(msg)
+            return JsonPathValueQuerier(self._log, self.msg.json_query_params.json_path)
+        elif self.msg.json_query_method == "OBJECTPATH":
+            if not self.msg.json_query_params.has_field("object_path"):
+                msg = "OBJECTPATH query_method requires json_query_params.object_path to be filled in %s"
+                self._log.error(msg, self.msg)
+                raise ValueError(msg)
+            return ObjectPathValueQuerier(self._log, self.msg.json_query_params.object_path)
+        else:
+            msg = "Unknown JSON query method: %s" % self.json_query_method
+            self._log.error(msg)
+            raise ValueError(msg)
+
+    @property
+    def current_value(self):
+        return self._current_value
+
+    @property
+    def msg(self):
+        msg = self._vnfr_mon_param_msg
+        value_type = msg.value_type
+
+        if self._current_value is None:
+            return msg
+
+        if value_type == "INT":
+            msg.value_integer = self._current_value
+
+        elif value_type == "DECIMAL":
+            msg.value_decimal = self._current_value
+
+        elif value_type == "STRING":
+            msg.value_string = self._current_value
+
+        else:
+            self._log.debug("Unknown value_type: %s", value_type)
+
+        return msg
+
+    @property
+    def path(self):
+        return self.msg.http_endpoint_ref
+
+    @property
+    def value_type(self):
+        return self.msg.value_type
+
+    @property
+    def json_query_method(self):
+        return self.msg.json_query_method
+
+    @property
+    def json_path(self):
+        return self.msg.json_path_params.json_path
+
+    @property
+    def name(self):
+        return self.msg.name
+
+    def extract_value_from_response(self, response_msg):
+        if self._json_querier is None:
+            self._log.warning("json querier is not created.  Cannot extract value form response.")
+            return
+
+        try:
+            value = self._json_querier.query(response_msg)
+            converted_value = self._value_converter.convert(value)
+        except MonitoringParamError as e:
+            self._log.warning("Failed to extract value from json response: %s", str(e))
+            return
+        else:
+            self._current_value = converted_value
+
+
+class EndpointMonParamsPoller(object):
+    REQUEST_TIMEOUT_SECS = 10
+
+    def __init__(self, log, loop, endpoint, mon_params, on_update_cb=None):
+        self._log = log
+        self._loop = loop
+        self._endpoint = endpoint
+        self._mon_params = mon_params
+        self._on_update_cb = on_update_cb
+
+        self._poll_task = None
+
+    @property
+    def poll_interval(self):
+        return self._endpoint.poll_interval
+
+    def _get_mon_param_msgs(self):
+        return [mon_param.msg for mon_param in self._mon_params]
+
+    def _notify_subscriber(self):
+        if self._on_update_cb is None:
+             return
+
+        self._on_update_cb(self._get_mon_param_msgs())
+
+    def _apply_response_to_mon_params(self, response_msg):
+        for mon_param in self._mon_params:
+            mon_param.extract_value_from_response(response_msg)
+
+        self._notify_subscriber()
+
+    @asyncio.coroutine
+    def _poll_loop(self):
+        self._log.debug("Starting http endpoint %s poll loop", self._endpoint.url)
+        while True:
+            try:
+                response = yield from self._endpoint.poll()
+                self._apply_response_to_mon_params(response)
+            except concurrent.futures.CancelledError as e:
+                return
+
+            yield from asyncio.sleep(self.poll_interval, loop=self._loop)
+
+    def start(self):
+        self._log.debug("Got start request for endpoint poller: %s",
+                        self._endpoint.url)
+        if self._poll_task is not None:
+            return
+        self._poll_task = self._loop.create_task(self._poll_loop())
+
+    def stop(self):
+        self._log.debug("Got stop request for endpoint poller: %s",
+                        self._endpoint.url)
+        if self._poll_task is None:
+            return
+
+        self._poll_task.cancel()
+
+        self._poll_task = None
+
+
+class VnfMonitoringParamsController(object):
+    def __init__(self, log, loop, vnfr_id, management_ip,
+                 http_endpoint_msgs, monitoring_param_msgs,
+                 on_update_cb=None):
+        self._log = log
+        self._loop = loop
+        self._vnfr_id = vnfr_id
+        self._management_ip = management_ip
+        self._http_endpoint_msgs = http_endpoint_msgs
+        self._monitoring_param_msgs = monitoring_param_msgs
+
+        self._on_update_cb = on_update_cb
+        self._endpoints = self._create_endpoints()
+        self._mon_params = self._create_mon_params()
+
+        self._endpoint_mon_param_map = self._create_endpoint_mon_param_map(
+                self._endpoints, self._mon_params
+                )
+        self._endpoint_pollers = self._create_endpoint_pollers(self._endpoint_mon_param_map)
+
+    def _create_endpoints(self):
+        path_endpoint_map = {}
+        for ep_msg in self._http_endpoint_msgs:
+            endpoint = HTTPEndpoint(
+                    self._log,
+                    self._loop,
+                    self._management_ip,
+                    ep_msg,
+                    )
+            path_endpoint_map[endpoint.path] = endpoint
+
+        return path_endpoint_map
+
+    def _create_mon_params(self):
+        mon_params = {}
+        for mp_msg in self._monitoring_param_msgs:
+            mon_params[mp_msg.id] = MonitoringParam(
+                    self._log,
+                    mp_msg,
+                    )
+
+        return mon_params
+
+    def _create_endpoint_mon_param_map(self, endpoints, mon_params):
+        ep_mp_map = collections.defaultdict(list)
+        for mp in mon_params.values():
+            endpoint = endpoints[mp.path]
+            ep_mp_map[endpoint].append(mp)
+
+        return ep_mp_map
+
+    def _create_endpoint_pollers(self, ep_mp_map):
+        pollers = []
+
+        for endpoint, mon_params in ep_mp_map.items():
+            poller = EndpointMonParamsPoller(
+                    self._log,
+                    self._loop,
+                    endpoint,
+                    mon_params,
+                    self._on_update_cb
+                    )
+
+            pollers.append(poller)
+
+        return pollers
+
+    @property
+    def msgs(self):
+        msgs = []
+        for mp in self.mon_params:
+            msgs.append(mp.msg)
+
+        return msgs
+
+    @property
+    def mon_params(self):
+        return list(self._mon_params.values())
+
+    @property
+    def endpoints(self):
+        return list(self._endpoints.values())
+
+    def start(self):
+        """ Start monitoring """
+        self._log.debug("Starting monitoring of VNF id: %s", self._vnfr_id)
+        for poller in self._endpoint_pollers:
+            poller.start()
+
+    def stop(self):
+        """ Stop monitoring """
+        self._log.debug("Stopping monitoring of VNF id: %s", self._vnfr_id)
+        for poller in self._endpoint_pollers:
+            poller.stop()
+
+
+class VnfMonitorDtsHandler(mano_dts.DtsHandler):
+    """ VNF monitoring class """
+    # List of list: So we need to register for the list in the deepest level
+    XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:monitoring-param"
+
+    @classmethod
+    def from_vnf_data(cls, tasklet, vnfr_msg, vnfd_msg):
+        handler = cls(tasklet.log, tasklet.dts, tasklet.loop,
+                vnfr_msg.id, vnfr_msg.mgmt_interface.ip_address,
+                vnfd_msg.monitoring_param, vnfd_msg.http_endpoint)
+
+        return handler
+
+    def __init__(self, log, dts, loop, vnfr_id, mgmt_ip, params, endpoints):
+        super().__init__(log, dts, loop)
+
+        self._mgmt_ip = mgmt_ip
+        self._vnfr_id = vnfr_id
+
+        mon_params = []
+        for mon_param in params:
+            param = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict(
+                    mon_param.as_dict()
+                    )
+            mon_params.append(param)
+
+        http_endpoints = []
+        for endpoint in endpoints:
+            endpoint = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint.from_dict(
+                    endpoint.as_dict()
+                    )
+            http_endpoints.append(endpoint)
+
+        self.log.debug("Creating monitoring param controller")
+        self.log.debug(" - Endpoints: %s", http_endpoints)
+        self.log.debug(" - Monitoring Params: %s", mon_params)
+
+        self._mon_param_controller = VnfMonitoringParamsController(
+                self.log,
+                self.loop,
+                self._vnfr_id,
+                self._mgmt_ip,
+                http_endpoints,
+                mon_params,
+                self.on_update_mon_params
+                )
+
+    def on_update_mon_params(self, mon_param_msgs):
+        for param_msg in mon_param_msgs:
+            self.reg.update_element(
+                    self.xpath(param_msg.id),
+                    param_msg,
+                    rwdts.XactFlag.ADVISE
+                   )
+
+    def start(self):
+        self._mon_param_controller.start()
+
+    def stop(self):
+        self.deregister()
+        self._mon_param_controller.stop()
+
+    def xpath(self, param_id=None):
+        """ Monitoring params xpath """
+        return("D,/vnfr:vnfr-catalog" +
+               "/vnfr:vnfr[vnfr:id='{}']".format(self._vnfr_id) +
+               "/vnfr:monitoring-param" +
+               ("[vnfr:id='{}']".format(param_id) if param_id else ""))
+
+    @property
+    def msg(self):
+        """ The message with the monitoing params """
+        return self._mon_param_controller.msgs
+
+    def __del__(self):
+        self.stop()
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register with dts """
+
+        self.reg = yield from self.dts.register(xpath=self.xpath(),
+                  flags=rwdts.Flag.PUBLISHER|rwdts.Flag.CACHE|rwdts.Flag.NO_PREP_READ)
+
+        assert self.reg is not None
+
+    def deregister(self):
+        """ de-register with dts """
+        if self.reg is not None:
+            self.log.debug("Deregistering path %s, regh = %s",
+                            VnfMonitorDtsHandler.XPATH,
+                            self.reg)
+            self.reg.deregister()
+            self.reg = None
+            self._vnfr = None
diff --git a/rwlaunchpad/plugins/rwmonparam/rwmonparam.py b/rwlaunchpad/plugins/rwmonparam/rwmonparam.py
new file mode 100644
index 0000000..571c45d
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonparam/rwmonparam.py
@@ -0,0 +1,25 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwmonparam
+
+class Tasklet(rift.tasklets.rwmonparam.MonitoringParameterTasklet):
+    pass
diff --git a/rwlaunchpad/plugins/rwmonparam/test/utest_aggregator.py b/rwlaunchpad/plugins/rwmonparam/test/utest_aggregator.py
new file mode 100644
index 0000000..f7b8e88
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonparam/test/utest_aggregator.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import asyncio
+import base64
+import logging
+import os
+import sys
+import tornado.escape
+import tornado.platform.asyncio
+import tornado.testing
+import tornado.web
+import unittest
+import xmlrunner
+
+import rift.tasklets.rwmonparam.aggregator as aggregator
+
+
+from gi.repository import VnfrYang
+
+logger = logging.getLogger("mon_params_test.py")
+
+
+class TestAggregator(unittest.TestCase):
+
+    def test_int_aggregator(self):
+        int_agg = aggregator.IntValueAggregator("SUM", [1, 2, 3])
+        self.assertEqual(int_agg.aggregate(), ("value_integer", 6))
+
+        int_agg = aggregator.IntValueAggregator("AVERAGE", [1, 2, 3])
+        self.assertEqual(int_agg.aggregate(), ("value_integer", 2))
+
+        int_agg = aggregator.IntValueAggregator("MAXIMUM", [1, 2, 3])
+        self.assertEqual(int_agg.aggregate(), ("value_integer", 3))
+
+        int_agg = aggregator.IntValueAggregator("MINIMUM", [1, 2, 3])
+        self.assertEqual(int_agg.aggregate(), ("value_integer", 1))
+
+        int_agg = aggregator.IntValueAggregator("COUNT", [1, 2, 3])
+        self.assertEqual(int_agg.aggregate(), ("value_integer", 3))
+
+    def test_decimal_aggregator(self):
+        int_agg = aggregator.DecimalValueAggregator("SUM", [1.1, 2, 3])
+        self.assertEqual(int_agg.aggregate(), ("value_decimal", 6.1))
+
+        int_agg = aggregator.DecimalValueAggregator("AVERAGE", [1, 2, 3])
+        self.assertEqual(int_agg.aggregate(), ("value_decimal", 2.0))
+
+        int_agg = aggregator.DecimalValueAggregator("MAXIMUM", [1, 2, 3.3])
+        self.assertEqual(int_agg.aggregate(), ("value_decimal", 3.3))
+
+        int_agg = aggregator.DecimalValueAggregator("MINIMUM", [1.1, 2, 3.3])
+        self.assertEqual(int_agg.aggregate(), ("value_decimal", 1.1))
+
+        int_agg = aggregator.DecimalValueAggregator("COUNT", [1.1, 2, 3.3])
+        self.assertEqual(int_agg.aggregate(), ("value_decimal", 3))
+
+
+def main(argv=sys.argv[1:]):
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(
+            argv=[__file__] + argv,
+            testRunner=xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+            )
+
+if __name__ == '__main__':
+    main()
+
diff --git a/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params.py b/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params.py
new file mode 100755
index 0000000..a0817d7
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params.py
@@ -0,0 +1,933 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import asyncio
+import base64
+import logging
+import os
+import sys
+import tornado.escape
+import tornado.platform.asyncio
+import tornado.testing
+import tornado.web
+import unittest
+import xmlrunner
+
+import rift.tasklets.rwmonparam.vnfr_core as mon_params
+
+
+from gi.repository import VnfrYang
+
+logger = logging.getLogger("mon_params_test.py")
+
+
+class AsyncioTornadoTest(tornado.testing.AsyncHTTPTestCase):
+    def setUp(self):
+        self._loop = asyncio.get_event_loop()
+        super().setUp()
+
+    def get_new_ioloop(self):
+        return tornado.platform.asyncio.AsyncIOMainLoop()
+
+
+class MonParamsPingStatsTest(AsyncioTornadoTest):
+    ping_path = r"/api/v1/ping/stats"
+    ping_response = {
+            'ping-request-tx-count': 5,
+            'ping-response-rx-count': 10
+            }
+
+    mon_param_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam()
+    mon_param_msg.from_dict({
+            'id': '1',
+            'name': 'ping-request-tx-count',
+            'json_query_method': "NAMEKEY",
+            'http_endpoint_ref': ping_path,
+            'value_type': "INT",
+            'description': 'no of ping requests',
+            'group_tag': 'Group1',
+            'widget_type': 'COUNTER',
+            'units': 'packets'
+            })
+
+    endpoint_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint()
+    endpoint_msg.from_dict({
+        'path': ping_path,
+        'polling_interval_secs': 1,
+        'username': 'admin',
+        'password': 'password',
+        'headers': [{'key': 'TEST_KEY', 'value': 'TEST_VALUE'}],
+        })
+
+    def create_endpoint(self, endpoint_msg):
+        self.mon_port = self.get_http_port()
+        endpoint = mon_params.HTTPEndpoint(
+                logger,
+                self._loop,
+                "127.0.0.1",
+                self.endpoint_msg,
+                )
+        # For each creation, update the descriptor as well
+        endpoint_msg.port = self.mon_port
+
+        return endpoint
+
+    def create_mon_param(self):
+        return mon_params.MonitoringParam(logger, self.mon_param_msg)
+
+    def get_app(self):
+        class PingStatsHandler(tornado.web.RequestHandler):
+            def get(this):
+                test_header = this.request.headers.get('TEST_KEY')
+                if test_header is None or test_header != 'TEST_VALUE':
+                    this.set_status(401)
+                    this.finish()
+                    return None
+
+                auth_header = this.request.headers.get('Authorization')
+                if auth_header is None or not auth_header.startswith('Basic '):
+                    this.set_status(401)
+                    this.set_header('WWW-Authenticate', 'Basic realm=Restricted')
+                    this._transforms = []
+                    this.finish()
+                    return None
+
+                auth_header = auth_header.encode('ascii')
+                auth_decoded = base64.decodestring(auth_header[6:]).decode('ascii')
+                login, password = auth_decoded.split(':', 2)
+                login = login.encode('ascii')
+                password = password.encode('ascii')
+                is_auth = (login == b"admin" and password == b"password")
+
+                if not is_auth:
+                    this.set_status(401)
+                    this.set_header('WWW-Authenticate', 'Basic realm=Restricted')
+                    this._transforms = []
+                    this.finish()
+                    return None
+
+                this.write(self.ping_response)
+
+        return tornado.web.Application([
+            (self.ping_path, PingStatsHandler),
+            ])
+
+    def test_value_convert(self):
+        float_con = mon_params.ValueConverter("DECIMAL")
+        int_con = mon_params.ValueConverter("INT")
+        text_con = mon_params.ValueConverter("STRING")
+
+        a = float_con.convert("1.23")
+        self.assertEqual(a, 1.23)
+
+        a = float_con.convert(1)
+        self.assertEqual(a, float(1))
+
+        t = text_con.convert(1.23)
+        self.assertEqual(t, "1.23")
+
+        t = text_con.convert("asdf")
+        self.assertEqual(t, "asdf")
+
+        i = int_con.convert(1.23)
+        self.assertEqual(i, 1)
+
+    def test_json_key_value_querier(self):
+        kv_querier = mon_params.JsonKeyValueQuerier(logger, "ping-request-tx-count")
+        value = kv_querier.query(tornado.escape.json_encode(self.ping_response))
+        self.assertEqual(value, 5)
+
+    def test_json_path_value_querier(self):
+        kv_querier = mon_params.JsonPathValueQuerier(logger, '$.ping-request-tx-count')
+        value = kv_querier.query(tornado.escape.json_encode(self.ping_response))
+        self.assertEqual(value, 5)
+
+    def test_object_path_value_querier(self):
+        kv_querier = mon_params.ObjectPathValueQuerier(logger, "$.*['ping-request-tx-count']")
+        value = kv_querier.query(tornado.escape.json_encode(self.ping_response))
+        self.assertEqual(value, 5)
+
+    def test_endpoint(self):
+        @asyncio.coroutine
+        def run_test():
+            endpoint = self.create_endpoint(self.endpoint_msg)
+            resp = yield from endpoint.poll()
+            resp_json = tornado.escape.json_decode(resp)
+            self.assertEqual(resp_json["ping-request-tx-count"], 5)
+            self.assertEqual(resp_json["ping-response-rx-count"], 10)
+
+        self._loop.run_until_complete(
+                asyncio.wait_for(run_test(), 10, loop=self._loop)
+                )
+
+    def test_mon_param(self):
+        a = self.create_mon_param()
+        a.extract_value_from_response(tornado.escape.json_encode(self.ping_response))
+        self.assertEqual(a.current_value, 5)
+        self.assertEqual(a.msg.value_integer, 5)
+
+    def test_endpoint_poller(self):
+        endpoint = self.create_endpoint(self.endpoint_msg)
+        mon_param = self.create_mon_param()
+        poller = mon_params.EndpointMonParamsPoller(
+                logger, self._loop, endpoint, [mon_param],
+                )
+        poller.start()
+
+        self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop))
+        self.assertEqual(mon_param.current_value, 5)
+
+        poller.stop()
+
+    def test_params_controller(self):
+        new_port = self.get_http_port()
+        # Update port after new port is initialized
+        self.endpoint_msg.port = new_port
+        ctrl = mon_params.VnfMonitoringParamsController(
+                logger, self._loop, "1", "127.0.0.1", 
+                [self.endpoint_msg], [self.mon_param_msg],
+                )
+        ctrl.start()
+
+        self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop))
+
+        ctrl.stop()
+
+        self.assertEqual(1, len(ctrl.mon_params))
+        mon_param = ctrl.mon_params[0]
+        self.assertEqual(mon_param.current_value, 5)
+
+
+class AsyncioTornadoHttpsTest(tornado.testing.AsyncHTTPSTestCase):
+    def setUp(self):
+        self._loop = asyncio.get_event_loop()
+        super().setUp()
+
+    def get_new_ioloop(self):
+        return tornado.platform.asyncio.AsyncIOMainLoop()
+
+
+class MonParamsPingStatsHttpsTest(AsyncioTornadoHttpsTest):
+    ping_path = r"/api/v1/ping/stats"
+    ping_response = {
+            'ping-request-tx-count': 5,
+            'ping-response-rx-count': 10
+            }
+
+    mon_param_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam()
+    mon_param_msg.from_dict({
+            'id': '1',
+            'name': 'ping-request-tx-count',
+            'json_query_method': "NAMEKEY",
+            'http_endpoint_ref': ping_path,
+            'value_type': "INT",
+            'description': 'no of ping requests',
+            'group_tag': 'Group1',
+            'widget_type': 'COUNTER',
+            'units': 'packets'
+            })
+
+    endpoint_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint()
+    endpoint_msg.from_dict({
+        'path': ping_path,
+        'https': 'true',
+        'polling_interval_secs': 1,
+        'username': 'admin',
+        'password': 'password',
+        'headers': [{'key': 'TEST_KEY', 'value': 'TEST_VALUE'}],
+        })
+
+    def create_endpoint(self, endpoint_msg):
+        self.mon_port = self.get_http_port()
+        endpoint = mon_params.HTTPEndpoint(
+                logger,
+                self._loop,
+                "127.0.0.1",
+                self.endpoint_msg,
+                )
+        # For each creation, update the descriptor as well
+        endpoint_msg.port = self.mon_port
+
+        return endpoint
+
+    def create_mon_param(self):
+        return mon_params.MonitoringParam(logger, self.mon_param_msg)
+
+    def get_app(self):
+        class PingStatsHandler(tornado.web.RequestHandler):
+            def get(this):
+                test_header = this.request.headers.get('TEST_KEY')
+                if test_header is None or test_header != 'TEST_VALUE':
+                    this.set_status(401)
+                    this.finish()
+                    return None
+
+                auth_header = this.request.headers.get('Authorization')
+                if auth_header is None or not auth_header.startswith('Basic '):
+                    this.set_status(401)
+                    this.set_header('WWW-Authenticate', 'Basic realm=Restricted')
+                    this._transforms = []
+                    this.finish()
+                    return None
+
+                auth_header = auth_header.encode('ascii')
+                auth_decoded = base64.decodestring(auth_header[6:]).decode('ascii')
+                login, password = auth_decoded.split(':', 2)
+                login = login.encode('ascii')
+                password = password.encode('ascii')
+                is_auth = (login == b"admin" and password == b"password")
+
+                if not is_auth:
+                    this.set_status(401)
+                    this.set_header('WWW-Authenticate', 'Basic realm=Restricted')
+                    this._transforms = []
+                    this.finish()
+                    return None
+
+                this.write(self.ping_response)
+
+        return tornado.web.Application([
+            (self.ping_path, PingStatsHandler),
+            ])
+
+    def test_value_convert(self):
+        float_con = mon_params.ValueConverter("DECIMAL")
+        int_con = mon_params.ValueConverter("INT")
+        text_con = mon_params.ValueConverter("STRING")
+
+        a = float_con.convert("1.23")
+        self.assertEqual(a, 1.23)
+
+        a = float_con.convert(1)
+        self.assertEqual(a, float(1))
+
+        t = text_con.convert(1.23)
+        self.assertEqual(t, "1.23")
+
+        t = text_con.convert("asdf")
+        self.assertEqual(t, "asdf")
+
+        i = int_con.convert(1.23)
+        self.assertEqual(i, 1)
+
+    def test_json_key_value_querier(self):
+        kv_querier = mon_params.JsonKeyValueQuerier(logger, "ping-request-tx-count")
+        value = kv_querier.query(tornado.escape.json_encode(self.ping_response))
+        self.assertEqual(value, 5)
+
+    def test_endpoint(self):
+        @asyncio.coroutine
+        def run_test():
+            endpoint = self.create_endpoint(self.endpoint_msg)
+            resp = yield from endpoint.poll()
+            resp_json = tornado.escape.json_decode(resp)
+            self.assertEqual(resp_json["ping-request-tx-count"], 5)
+            self.assertEqual(resp_json["ping-response-rx-count"], 10)
+
+        self._loop.run_until_complete(
+                asyncio.wait_for(run_test(), 10, loop=self._loop)
+                )
+
+    def test_mon_param(self):
+        a = self.create_mon_param()
+        a.extract_value_from_response(tornado.escape.json_encode(self.ping_response))
+        self.assertEqual(a.current_value, 5)
+        self.assertEqual(a.msg.value_integer, 5)
+
+    def test_endpoint_poller(self):
+        endpoint = self.create_endpoint(self.endpoint_msg)
+        mon_param = self.create_mon_param()
+        poller = mon_params.EndpointMonParamsPoller(
+                logger, self._loop, endpoint, [mon_param],
+                )
+        poller.start()
+
+        self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop))
+        self.assertEqual(mon_param.current_value, 5)
+
+        poller.stop()
+
+    def test_params_controller(self):
+        new_port = self.get_http_port()
+        # Update port after new port is initialized
+        self.endpoint_msg.port = new_port
+        ctrl = mon_params.VnfMonitoringParamsController(
+                logger, self._loop, "1", "127.0.0.1", 
+                [self.endpoint_msg], [self.mon_param_msg],
+                )
+        ctrl.start()
+
+        self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop))
+
+        ctrl.stop()
+
+        self.assertEqual(1, len(ctrl.mon_params))
+        mon_param = ctrl.mon_params[0]
+        self.assertEqual(mon_param.current_value, 5)
+
+
+class VRouterStatsTest(unittest.TestCase):
+    system_response = {
+        "system": {
+            "cpu": [
+                {
+                    "usage": 2.35,
+                    "cpu": "all"
+                },
+                {
+                    "usage": 5.35,
+                    "cpu": "1"
+                }
+            ]
+        }
+    }
+
+    def test_object_path_value_querier(self):
+        kv_querier = mon_params.ObjectPathValueQuerier(logger, "$.system.cpu[@.cpu is 'all'].usage")
+        value = kv_querier.query(tornado.escape.json_encode(self.system_response))
+        self.assertEqual(value, 2.35)
+
+
+class TrafsinkStatsTest(unittest.TestCase):
+    system_response = {
+       "rw-vnf-base-opdata:port-state": [
+         {
+           "ip": [
+             {
+               "address": "12.0.0.3/24"
+             }
+           ],
+           "rw-trafgen-data:trafgen-info": {
+             "src_l4_port": 1234,
+             "dst_l4_port": 5678,
+             "dst_ip_address": "192.168.1.1",
+             "tx_state": "Off",
+             "dst_mac_address": "00:00:00:00:00:00",
+             "tx_mode": "single-template",
+             "packet-count": 0,
+             "tx-cycles": 5478,
+             "tx_burst": 16,
+             "src_ip_address": "192.168.0.1",
+             "pkt_size": 64,
+             "src_mac_address": "fa:16:3e:07:b1:52",
+             "descr-string": "",
+             "tx_rate": 100
+           },
+           "counters": {
+             "input-errors": 0,
+             "output-bytes": 748,
+             "input-pause-xoff-pkts": 0,
+             "input-badcrc-pkts": 0,
+             "input-bytes": 62,
+             "rx-rate-mbps": 9576,
+             "output-pause-xoff-pkts": 0,
+             "input-missed-pkts": 0,
+             "input-packets": 1,
+             "output-errors": 0,
+             "tx-rate-mbps": 0,
+             "input-pause-xon-pkts": 0,
+             "output-pause-xon-pkts": 0,
+             "tx-rate-pps": 0,
+             "input-mcast-pkts": 0,
+             "rx-rate-pps": 0,
+             "output-packets": 6,
+             "input-nombuf-pkts": 0
+           },
+           "info": {
+             "numa-socket": 0,
+             "transmit-queues": 1,
+             "privatename": "eth_uio:pci=0000:00:04.0",
+             "duplex": "full-duplex",
+             "virtual-fabric": "No",
+             "link-state": "up",
+             "rte-port-id": 0,
+             "fastpath-instance": 1,
+             "id": 0,
+             "app-name": "rw_trafgen",
+             "speed": 10000,
+             "receive-queues": 1,
+             "descr-string": "",
+             "mac": "fa:16:3e:07:b1:52"
+           },
+           "portname": "trafsink_vnfd/cp0",
+           "queues": {
+             "rx-queue": [
+               {
+                 "packets": 1,
+                 "bytes-MB": 0,
+                 "qid": 0,
+                 "rate-mbps": 0,
+                 "rate-pps": 0
+               }
+             ],
+             "tx-queue": [
+               {
+                 "bytes-MB": 0,
+                 "packets": 6,
+                 "rate-pps": 0,
+                 "errors": 0,
+                 "qid": 0,
+                 "rate-mbps": 0
+               }
+             ]
+           }
+         }
+       ]
+     }
+
+    def test_object_path_value_querier(self):
+        kv_querier = mon_params.ObjectPathValueQuerier(logger, "$..*[@.portname is 'trafsink_vnfd/cp0'].counters.'rx-rate-mbps'")
+        value = kv_querier.query(tornado.escape.json_encode(self.system_response))
+        self.assertEqual(value, 9576)
+
+class IkeStatsTest(unittest.TestCase):
+    system_response = {
+      "rw-ipsec:ipsec-service-statistics": [
+    {
+      "name": "client1",
+      "statistics": {
+        "esp": {
+          "rx-bytes": 0,
+          "rx-packets": 0,
+          "tx-bytes": 0,
+          "tx-packets": 0
+        },
+        "rekey": {
+          "total": 3321,
+          "rate": 132,
+          "instantaneous-rate": 2
+        },
+        "state": {
+          "ike-sas": 10,
+          "threads-in-use": 5,
+          "swanctl-dir": "\/tmp\/strongswan4x3dni"
+        }
+      }
+    },
+    {
+      "name": "client0",
+      "statistics": {
+        "esp": {
+          "rx-bytes": 0,
+          "rx-packets": 0,
+          "tx-bytes": 0,
+          "tx-packets": 0
+        },
+        "rekey": {
+          "total": 3345,
+          "rate": 0,
+          "instantaneous-rate": 0
+        },
+        "state": {
+          "ike-sas": 50,
+          "threads-in-use": 5,
+          "swanctl-dir": "\/tmp\/strongswann21td3"
+        }
+      }
+    }
+  ]
+     }
+
+
+    def test_object_path_value_querier(self):
+        kv_querier = mon_params.ObjectPathValueQuerier(logger, "$..*[@.name is 'client1'].statistics.rekey.rate")
+        value = kv_querier.query(tornado.escape.json_encode(self.system_response))
+        self.assertEqual(value, 132)
+        kv_querier = mon_params.ObjectPathValueQuerier(logger, "$..*[@.name is 'client1'].statistics.state.'ike-sas'")
+        value = kv_querier.query(tornado.escape.json_encode(self.system_response))
+        self.assertEqual(value, 10)
+
+
+
+
+class PortLatencyTest(unittest.TestCase):
+    system_response = {
+  "rw-vnf-base-opdata:port-state": [
+    {
+      "info": {
+        "fastpath-instance": 1,
+        "duplex": "full-duplex",
+        "link-state": "up",
+        "lport-id": 81931,
+        "mtu": 1500,
+        "descr-string": "",
+        "transmit-queues": 1,
+        "mac": "fa:16:3e:c7:4a:b8",
+        "admin-state": "up",
+        "rte-port-id": 0,
+        "numa-socket": 0,
+        "app-name": "rw_trafgen",
+        "speed": 10000,
+        "virtual-fabric": "No",
+        "id": 0,
+        "receive-queues": 1,
+        "privatename": "eth_uio:pci=0000:00:04.0"
+      },
+      "rw-trafgen-data:trafgen-info": {
+        "maximum-latency": 124412,
+        "latency-distribution": [
+          {
+            "range-end": 100,
+            "range-start": 0,
+            "packets": 0
+          },
+          {
+            "range-end": 200,
+            "range-start": 101,
+            "packets": 0
+          },
+          {
+            "range-end": 300,
+            "range-start": 201,
+            "packets": 0
+          },
+          {
+            "range-end": 400,
+            "range-start": 301,
+            "packets": 0
+          },
+          {
+            "range-end": 500,
+            "range-start": 401,
+            "packets": 0
+          },
+          {
+            "range-end": 600,
+            "range-start": 501,
+            "packets": 0
+          },
+          {
+            "range-end": 700,
+            "range-start": 601,
+            "packets": 0
+          },
+          {
+            "range-end": 800,
+            "range-start": 701,
+            "packets": 0
+          },
+          {
+            "range-end": 900,
+            "range-start": 801,
+            "packets": 0
+          },
+          {
+            "range-end": 1000,
+            "range-start": 901,
+            "packets": 0
+          },
+          {
+            "range-end": 1100,
+            "range-start": 1001,
+            "packets": 0
+          },
+          {
+            "range-end": 1200,
+            "range-start": 1101,
+            "packets": 0
+          },
+          {
+            "range-end": 1300,
+            "range-start": 1201,
+            "packets": 0
+          },
+          {
+            "range-end": 1400,
+            "range-start": 1301,
+            "packets": 0
+          },
+          {
+            "range-end": 1500,
+            "range-start": 1401,
+            "packets": 0
+          },
+          {
+            "range-end": 0,
+            "range-start": 1501,
+            "packets": 1513641
+          }
+        ],
+        "descr-string": "",
+        "tx_mode": "range-template",
+        "minimum-latency": 1928,
+        "pkt_size": 512,
+        "tx_rate": 100,
+        "tx-cycles": 35206,
+        "src_ip_address": "12.0.0.3",
+        "src_l4_port": 10000,
+        "dst_ip_address": "12.0.0.2",
+        "mean-deviation": 4500,
+        "queue": [
+          {
+            "maximum": 124412,
+            "num-packets": 1513641,
+            "average": 12112,
+            "mean-deviation": 4500,
+            "qid": 0,
+            "minimum": 1928
+          }
+        ],
+        "packet-count": 0,
+        "average-latency": 12112,
+        "dst_l4_port": 5678,
+        "tx_state": "On",
+        "tx_burst": 16
+      },
+      "counters": {
+        "tx-rate-pps": 139630,
+        "rx-rate-mbps": 232,
+        "tx-rate-mbps": 589,
+        "output-packets": 49285239,
+        "input-missed-pkts": 0,
+        "output-errors": 0,
+        "input-nombuf-pkts": 0,
+        "input-errors": 0,
+        "input-mcast-pkts": 0,
+        "output-bytes": 26022584932,
+        "input-packets": 22537250,
+        "input-bytes": 11899650400,
+        "rx-rate-pps": 55158
+      },
+      "portname": "trafgencp0",
+      "ip": [
+        {
+          "address": "12.0.0.3\/24"
+        }
+      ],
+      "queues": {
+        "rx-queue": [
+          {
+            "packets": 22537250,
+            "bytes-MB": 95197,
+            "rate-mbps": 232,
+            "qid": 0,
+            "rate-pps": 55158
+          }
+        ],
+        "tx-queue": [
+          {
+            "bytes-MB": 208180,
+            "packets": 49285239,
+            "errors": 0,
+            "rate-mbps": 589,
+            "qid": 0,
+            "rate-pps": 139630
+          }
+        ]
+      },
+      "extended-stats": {
+        "xstats": [
+          {
+            "name": "rx_good_packets",
+            "value": 22555470
+          },
+          {
+            "name": "tx_good_packets",
+            "value": 49337664
+          },
+          {
+            "name": "rx_good_bytes",
+            "value": 11458161160
+          },
+          {
+            "name": "tx_good_bytes",
+            "value": 25063512052
+          },
+          {
+            "name": "rx_errors",
+            "value": 0
+          },
+          {
+            "name": "tx_errors",
+            "value": 0
+          },
+          {
+            "name": "rx_mbuf_allocation_errors",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_packets",
+            "value": 22555470
+          },
+          {
+            "name": "rx_q0_bytes",
+            "value": 11458161160
+          },
+          {
+            "name": "rx_q0_errors",
+            "value": 0
+          },
+          {
+            "name": "tx_q0_packets",
+            "value": 49337664
+          },
+          {
+            "name": "tx_q0_bytes",
+            "value": 25063512052
+          },
+          {
+            "name": "rx_q0_good_packets",
+            "value": 22555470
+          },
+          {
+            "name": "rx_q0_good_bytes",
+            "value": 11458161160
+          },
+          {
+            "name": "rx_q0_multicast_packets",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_broadcast_packets",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_undersize_packets",
+            "value": 38
+          },
+          {
+            "name": "rx_q0_size_64_packets",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_size_65_127_packets",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_size_128_255_packets",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_size_256_511_packets",
+            "value": 22555432
+          },
+          {
+            "name": "rx_q0_size_512_1023_packets",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_size_1024_1517_packets",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_size_1518_max_packets",
+            "value": 0
+          },
+          {
+            "name": "tx_q0_good_packets",
+            "value": 49337664
+          },
+          {
+            "name": "tx_q0_good_bytes",
+            "value": 25063512052
+          },
+          {
+            "name": "tx_q0_errors",
+            "value": 0
+          },
+          {
+            "name": "tx_q0_multicast_packets",
+            "value": 18
+          },
+          {
+            "name": "tx_q0_broadcast_packets",
+            "value": 11
+          },
+          {
+            "name": "tx_q0_undersize_packets",
+            "value": 40
+          },
+          {
+            "name": "tx_q0_size_64_packets",
+            "value": 0
+          },
+          {
+            "name": "tx_q0_size_65_127_packets",
+            "value": 5
+          },
+          {
+            "name": "tx_q0_size_128_255_packets",
+            "value": 2
+          },
+          {
+            "name": "tx_q0_size_256_511_packets",
+            "value": 49337617
+          },
+          {
+            "name": "tx_q0_size_512_1023_packets",
+            "value": 0
+          },
+          {
+            "name": "tx_q0_size_1024_1517_packets",
+            "value": 0
+          },
+          {
+            "name": "tx_q0_size_1518_max_packets",
+            "value": 0
+          }
+        ]
+      },
+      "lacp-info": {
+        "state": {
+          "distributing": "On",
+          "active": "Off",
+          "collecting": "On"
+        },
+        "counters": {
+          "marker": {
+            "rx": 0,
+            "tx": 0,
+            "errors": 0,
+            "nobuf": 0
+          },
+          "lacppdu": {
+            "rx": 0,
+            "tx": 0,
+            "errors": 0,
+            "nobuf": 0
+          }
+        }
+      }
+    }
+  ]
+    }
+
+
+    def test_object_path_value_querier(self):
+          kv_querier = mon_params.ObjectPathValueQuerier(logger, "$..*[@.portname is 'trafgencp0'].'rw-trafgen-data:trafgen-info'.pkt_size")
+          value = kv_querier.query(tornado.escape.json_encode(self.system_response))
+          self.assertEqual(value, 512)
+          kv_querier = mon_params.ObjectPathValueQuerier(logger, "$..*[@.portname is 'trafgencp0'].'rw-trafgen-data:trafgen-info'.'average-latency'")
+          value = kv_querier.query(tornado.escape.json_encode(self.system_response))
+          self.assertEqual(value, 12112)
+
+
+def main(argv=sys.argv[1:]):
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(
+            argv=[__file__] + argv,
+            testRunner=xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+            )
+
+if __name__ == '__main__':
+    main()
+
diff --git a/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params_dts.py b/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params_dts.py
new file mode 100644
index 0000000..680cc82
--- /dev/null
+++ b/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params_dts.py
@@ -0,0 +1,349 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import asyncio
+import itertools
+import logging
+import os
+import sys
+import unittest
+import uuid
+
+import xmlrunner
+import unittest.mock as mock
+
+from rift.tasklets.rwmonparam import vnfr_core as vnf_mon_params
+from rift.tasklets.rwmonparam import nsr_core as nsr_mon_params
+import rift.test.dts
+
+import gi
+gi.require_version('RwDtsYang', '1.0')
+from gi.repository import (
+        VnfrYang as vnfryang,
+        RwNsrYang,
+        RwLaunchpadYang as launchpadyang,
+        RwDts as rwdts,
+        RwVnfrYang,
+        RwVnfdYang,
+        RwNsdYang
+        )
+
+import utest_mon_params
+
+
+class MonParamMsgGenerator(object):
+    def __init__(self, num_messages=1):
+        ping_path = r"/api/v1/ping/stats"
+        self._endpoint_msg = vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint.from_dict({
+            'path': ping_path,
+            'https': 'true',
+            'polling_interval_secs': 1,
+            'username': 'admin',
+            'password': 'password',
+            'headers': [{'key': 'TEST_KEY', 'value': 'TEST_VALUE'}],
+            })
+
+        self._mon_param_msgs = []
+        for i in range(1, num_messages):
+            self._mon_param_msgs.append(vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
+                'id': '%s' % i,
+                'name': 'param_num_%s' % i,
+                'json_query_method': "NAMEKEY",
+                'http_endpoint_ref': ping_path,
+                'value_type': "INT",
+                'value_integer': i,
+                'description': 'desc for param_num_%s' % i,
+                'group_tag': 'Group1',
+                'widget_type': 'COUNTER',
+                'units': 'packets'
+                })
+            )
+
+        self._msgs = iter(self.mon_param_msgs)
+
+    @property
+    def mon_param_msgs(self):
+        return self._mon_param_msgs
+
+    @property
+    def endpoint_msgs(self):
+        return [self._endpoint_msg]
+
+    def next_message(self):
+        return next(self._msgs)
+
+
+
+class MonParamsDtsTestCase(rift.test.dts.AbstractDTSTest):
+    @classmethod
+    def configure_schema(cls):
+        return launchpadyang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", test_id)
+        self.tinfo = self.new_tinfo(str(test_id))
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+
+        self.tinfo_sub = self.new_tinfo(str(test_id) + "_sub")
+        self.dts_sub = rift.tasklets.DTS(self.tinfo_sub, self.schema, self.loop)
+
+        self.msg_gen = MonParamMsgGenerator(4)
+        self.vnf_handler = vnf_mon_params.VnfMonitorDtsHandler(
+                self.log, self.dts, self.loop, 1, "1.1.1.1",
+                self.msg_gen.mon_param_msgs, self.msg_gen.endpoint_msgs
+                )
+
+        store = self.setup_mock_store(aggregation_type=None,
+            monps=None,
+            legacy=True)
+
+        self.nsr_handler = nsr_mon_params.NsrMonitorDtsHandler(
+            self.log, self.dts, self.loop, store.nsr[0], [store.get_vnfr()], store)
+
+
+    def tearDown(self):
+        super().tearDown()
+
+    def setup_mock_store(self, aggregation_type, monps, legacy=False):
+        store = mock.MagicMock()
+
+        mock_vnfd =  RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict({
+            'id': "1",
+            'monitoring_param': [
+                {'description': 'no of ping requests',
+                 'group_tag': 'Group1',
+                 'http_endpoint_ref': 'api/v1/ping/stats',
+                 'id': '1',
+                 'json_query_method': 'NAMEKEY',
+                 'name': 'ping-request-tx-count',
+                 'units': 'packets',
+                 'value_type': 'INT',
+                 'widget_type': 'COUNTER'},
+                {'description': 'no of ping responses',
+                 'group_tag': 'Group1',
+                 'http_endpoint_ref': 'api/v1/ping/stats',
+                 'id': '2',
+                 'json_query_method': 'NAMEKEY',
+                 'name': 'ping-response-rx-count',
+                 'units': 'packets',
+                 'value_type': 'INT',
+                 'widget_type': 'COUNTER'}],
+            })
+        store.get_vnfd = mock.MagicMock(return_value=mock_vnfd)
+
+        mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({
+            'id': '1',
+            'vnfd_ref': '1',
+            'monitoring_param': ([monp.as_dict() for monp in monps] if not legacy else [])
+            })
+        store.get_vnfr = mock.MagicMock(return_value=mock_vnfr)
+
+        mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict({
+            'ns_instance_config_ref': "1",
+            'name_ref': "Foo",
+            'constituent_vnfr_ref': [{'vnfr_id': mock_vnfr.id}],
+
+            })
+        store.get_nsr = mock.MagicMock(return_value=mock_nsr)
+        store.nsr = [mock_nsr]
+
+        monp = [{'aggregation_type': aggregation_type,
+                 'id': '1',
+                 'description': 'no of ping requests',
+                 'group_tag': 'Group1',
+                 'units': 'packets',
+                 'widget_type': 'COUNTER',
+                 'name': 'ping-request-tx-count',
+                 'value_type': 'INT',
+                 'vnfd_monitoring_param': [
+                    {'vnfd_id_ref': '1',
+                     'vnfd_monitoring_param_ref': '1'},
+                    {'vnfd_id_ref': '1',
+                     'vnfd_monitoring_param_ref': '2'}]
+                }]
+
+        mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict({
+            'id': str(uuid.uuid1()),
+            'monitoring_param': (monp if not legacy else [])
+            })
+
+        store.get_nsd = mock.MagicMock(return_value=mock_nsd)
+
+        return store
+
+    @asyncio.coroutine
+    def get_published_xpaths(self):
+        published_xpaths = set()
+
+        res_iter = yield from self.dts.query_read("D,/rwdts:dts")
+        for i in res_iter:
+            res = (yield from i).result
+            for member in res.member:
+                published_xpaths |= {reg.keyspec for reg in member.state.registration if reg.flags == "publisher"}
+
+        return published_xpaths
+
+    @asyncio.coroutine
+    def register_vnf_publisher(self):
+        yield from self.vnf_handler.register()
+
+    def add_param_to_publisher(self):
+        msg = self.msg_gen.next_message()
+        self.vnf_handler.on_update_mon_params([msg])
+        return msg
+
+    @asyncio.coroutine
+    def register_vnf_test_subscriber(self, on_prepare=None):
+        ready_event = asyncio.Event(loop=self.loop)
+
+        # Register needs to wait till reg-ready is hit, dts does not provide it
+        # out-of-the-box.
+        @asyncio.coroutine
+        def on_ready(*args, **kwargs):
+            ready_event.set()
+
+        self.vnf_test_subscriber = yield from self.dts_sub.register(
+                self.vnf_handler.xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_ready=on_ready, on_prepare=on_prepare
+                    ),
+                flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.CACHE,
+                )
+
+        yield from ready_event.wait()
+
+    def get_ns_mon_param_msgs(self):
+        return self.ns_handler.get_nsr_mon_param_msgs({'1':['1']})
+
+    @rift.test.dts.async_test
+    def _test_vnf_handler_registration(self):
+        yield from self.vnf_handler.register()
+        published_xpaths = yield from self.get_published_xpaths()
+        assert self.vnf_handler.xpath() in published_xpaths
+
+    @rift.test.dts.async_test
+    def _test_add_vnf_mon_params(self):
+        yield from self.register_vnf_publisher()
+        self.add_param_to_publisher()
+
+        yield from self.register_vnf_test_subscriber()
+        self.add_param_to_publisher()
+
+        # RIFT-12888: Elements do not go immediately into cache after on_prepare.
+        # Because of this, we can't guarantee that the second param will actually be
+        # in the cache yet.
+        elements = list(self.vnf_test_subscriber.elements)
+        assert len(elements) > 0
+        for element in elements:
+            assert element in self.msg_gen.mon_param_msgs
+
+    @rift.test.dts.async_test
+    def _test_nsr_handler_registration(self):
+        yield from self.nsr_handler.register()
+        published_xpaths = yield from self.get_published_xpaths()
+        assert self.nsr_handler.xpath() in published_xpaths
+
+    def _test_publish(self, aggregation_type, expected_value, legacy=False):
+
+        self.msg_gen = MonParamMsgGenerator(4)
+        store = self.setup_mock_store(aggregation_type=aggregation_type,
+            monps=self.msg_gen.mon_param_msgs,
+            legacy=legacy)
+
+        self.vnf_handler = vnf_mon_params.VnfMonitorDtsHandler(
+                self.log, self.dts, self.loop, 1, "1.1.1.1",
+                self.msg_gen.mon_param_msgs, self.msg_gen.endpoint_msgs
+                )
+
+        self.nsr_handler = nsr_mon_params.NsrMonitorDtsHandler(
+            self.log, self.dts, self.loop, store.nsr[0], [store.get_vnfr()], store)
+
+        # def callback():
+        yield from self.nsr_handler.register()
+        yield from self.nsr_handler.start()
+        published_xpaths = yield from self.get_published_xpaths()
+
+        yield from self.register_vnf_publisher()
+        self.add_param_to_publisher()
+        self.add_param_to_publisher()
+
+        nsr_id = store.get_nsr().ns_instance_config_ref
+
+        yield from asyncio.sleep(5, loop=self.loop)
+
+        itr = yield from self.dts.query_read(self.nsr_handler.xpath(),
+            rwdts.XactFlag.MERGE)
+
+
+        values = []
+        for res in itr:
+            result = yield from res
+            nsr_monp = result.result
+            values.append(nsr_monp.value_integer)
+
+        print (values)
+        assert expected_value in values
+
+    @rift.test.dts.async_test
+    def _test_nsr_monitor_publish_avg(self):
+        yield from self._test_publish("AVERAGE", 1)
+
+    @rift.test.dts.async_test
+    def _test_nsr_monitor_publish_sum(self):
+        yield from self._test_publish("SUM", 3)
+
+
+    @rift.test.dts.async_test
+    def _test_nsr_monitor_publish_max(self):
+        yield from self._test_publish("MAXIMUM", 2)
+
+    @rift.test.dts.async_test
+    def _test_nsr_monitor_publish_min(self):
+        yield from self._test_publish("MINIMUM", 1)
+
+    @rift.test.dts.async_test
+    def test_nsr_monitor_publish_count(self):
+        yield from self._test_publish("COUNT", 2)
+
+    @rift.test.dts.async_test
+    def _test_legacy_nsr_monitor_publish_avg(self):
+        yield from self._test_publish("AVERAGE", 1, legacy=True)
+
+
+
+def main():
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+
+    MonParamsDtsTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwnsm/CMakeLists.txt b/rwlaunchpad/plugins/rwnsm/CMakeLists.txt
new file mode 100644
index 0000000..1db4a46
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/CMakeLists.txt
@@ -0,0 +1,47 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 05/15/2015
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwnsmtasklet)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
+    rift/tasklets/${TASKLET_NAME}/rwnsm_conman.py
+    rift/tasklets/${TASKLET_NAME}/rwnsmplugin.py
+    rift/tasklets/${TASKLET_NAME}/openmano_nsm.py
+    rift/tasklets/${TASKLET_NAME}/cloud.py
+    rift/tasklets/${TASKLET_NAME}/config_value_pool.py
+    rift/tasklets/${TASKLET_NAME}/publisher.py
+    rift/tasklets/${TASKLET_NAME}/xpath.py
+    rift/tasklets/${TASKLET_NAME}/rwvnffgmgr.py
+    rift/tasklets/${TASKLET_NAME}/scale_group.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/rwlaunchpad/plugins/rwnsm/Makefile b/rwlaunchpad/plugins/rwnsm/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/__init__.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/__init__.py
new file mode 100644
index 0000000..1a3438c
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/__init__.py
@@ -0,0 +1 @@
+from .rwnsmtasklet import NsmTasklet
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py
new file mode 100644
index 0000000..5326ca1
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py
@@ -0,0 +1,238 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+from gi.repository import (
+    RwDts as rwdts,
+    RwcalYang as rwcal,
+    RwTypes,
+    ProtobufC,
+    )
+
+import rift.mano.cloud
+import rift.mano.dts as mano_dts
+import rift.tasklets
+
+from . import openmano_nsm
+from . import rwnsmplugin
+
+
+class RwNsPlugin(rwnsmplugin.NsmPluginBase):
+    """
+        RW Implentation of the NsmPluginBase
+    """
+    def __init__(self, dts, log, loop, publisher, ro_account):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+    def create_nsr(self, nsr_msg, nsd):
+        """
+        Create Network service record
+        """
+        pass
+
+    @asyncio.coroutine
+    def deploy(self, nsr):
+        pass
+
+    @asyncio.coroutine
+    def instantiate_ns(self, nsr, config_xact):
+        """
+        Instantiate NSR with the passed nsr id
+        """
+        yield from nsr.instantiate(config_xact)
+
+    @asyncio.coroutine
+    def instantiate_vnf(self, nsr, vnfr):
+        """
+        Instantiate NSR with the passed nsr id
+        """
+        yield from vnfr.instantiate(nsr)
+
+    @asyncio.coroutine
+    def instantiate_vl(self, nsr, vlr):
+        """
+        Instantiate NSR with the passed nsr id
+        """
+        yield from vlr.instantiate()
+
+    @asyncio.coroutine
+    def terminate_ns(self, nsr):
+        """
+        Terminate the network service
+        """
+        pass
+
+    @asyncio.coroutine
+    def terminate_vnf(self, vnfr):
+        """
+        Terminate the network service
+        """
+        yield from vnfr.terminate()
+
+    @asyncio.coroutine
+    def terminate_vl(self, vlr):
+        """
+        Terminate the virtual link
+        """
+        yield from vlr.terminate()
+
+
+class NsmPlugins(object):
+    """ NSM Plugins """
+    def __init__(self):
+        self._plugin_classes = {
+                "openmano": openmano_nsm.OpenmanoNsPlugin,
+                }
+
+    @property
+    def plugins(self):
+        """ Plugin info """
+        return self._plugin_classes
+
+    def __getitem__(self, name):
+        """ Get item """
+        print("%s", self._plugin_classes)
+        return self._plugin_classes[name]
+
+    def register(self, plugin_name, plugin_class, *args):
+        """ Register a plugin to this Nsm"""
+        self._plugin_classes[plugin_name] = plugin_class
+
+    def deregister(self, plugin_name, plugin_class, *args):
+        """ Deregister a plugin to this Nsm"""
+        if plugin_name in self._plugin_classes:
+            del self._plugin_classes[plugin_name]
+
+    def class_by_plugin_name(self, name):
+        """ Get class by plugin name """
+        return self._plugin_classes[name]
+
+
+class ROAccountConfigSubscriber(mano_dts.AbstractConfigSubscriber):
+
+    def key_name(self):
+        return "name"
+
+    def get_xpath(self):
+        return("C,/rw-launchpad:resource-orchestrator")
+
+
+class CloudAccountConfigSubscriber:
+    def __init__(self, log, dts, log_hdl):
+        self._dts = dts
+        self._log = log
+        self._log_hdl = log_hdl
+
+        self._cloud_sub = rift.mano.cloud.CloudAccountConfigSubscriber(
+                self._dts,
+                self._log,
+                self._log_hdl,
+                rift.mano.cloud.CloudAccountConfigCallbacks())
+
+    def get_cloud_account_sdn_name(self, account_name):
+        if account_name in self._cloud_sub.accounts:
+            self._log.debug("Cloud accnt msg is %s",self._cloud_sub.accounts[account_name].account_msg)
+            if self._cloud_sub.accounts[account_name].account_msg.has_field("sdn_account"):
+                sdn_account = self._cloud_sub.accounts[account_name].account_msg.sdn_account 
+                self._log.info("SDN associated with Cloud name %s is %s", account_name, sdn_account)
+                return sdn_account
+            else:
+                self._log.debug("No SDN Account associated with Cloud name %s", account_name)
+                return None
+
+    @asyncio.coroutine
+    def register(self):
+       self._cloud_sub.register()
+
+
+class ROAccountPluginSelector(object):
+    """
+    Select the RO based on the config.
+
+    If no RO account is specified, then default to rift-ro.
+
+    Note:
+    Currently only one RO can be used (one-time global config.)
+    """
+    DEFAULT_PLUGIN = RwNsPlugin
+
+    def __init__(self, dts, log, loop, records_publisher):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._records_publisher = records_publisher
+
+        self._nsm_plugins = NsmPlugins()
+
+        self._ro_sub = ROAccountConfigSubscriber(
+                self._log,
+                self._dts,
+                self._loop,
+                callback=self.on_ro_account_change
+                )
+
+        # The default plugin will be RwNsPlugin
+        self._plugin_instances = {}
+        self._ro_plugin = self._create_plugin(self.DEFAULT_PLUGIN, None)
+
+    @property
+    def ro_plugin(self):
+        return self._ro_plugin
+
+    def on_ro_account_change(self, ro_account, action):
+        if action == rwdts.QueryAction.CREATE:
+            self._on_ro_account_added(ro_account)
+        elif action == rwdts.QueryAction.DELETE:
+            self._on_ro_account_deleted(ro_account)
+
+    def _on_ro_account_added(self, ro_account):
+        self._log.debug("Got nsm plugin RO account: %s", ro_account)
+        try:
+            nsm_cls = self._nsm_plugins.class_by_plugin_name(
+                    ro_account.account_type
+                    )
+        except KeyError as e:
+            self._log.debug(
+                "RO account nsm plugin not found: %s.  Using standard rift nsm.",
+                ro_account.name
+                )
+            nsm_cls = self.DEFAULT_PLUGIN
+
+        self._ro_plugin = self._create_plugin(nsm_cls, ro_account)
+
+    def _on_ro_account_deleted(self, ro_account):
+        self._ro_plugin = None
+
+    def _create_plugin(self, nsm_cls, ro_account):
+        # Check to see if the plugin was already instantiated
+        if nsm_cls in self._plugin_instances:
+            self._log.debug("RO account nsm plugin already instantiated.  Using existing.")
+            return self._plugin_instances[nsm_cls]
+
+        # Otherwise, instantiate a new plugin using the cloud account
+        self._log.debug("Instantiating new RO account using class: %s", nsm_cls)
+        nsm_instance = nsm_cls(self._dts, self._log, self._loop,
+                               self._records_publisher, ro_account)
+
+        self._plugin_instances[nsm_cls] = nsm_instance
+        return nsm_instance
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self._ro_sub.register()
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/config_value_pool.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/config_value_pool.py
new file mode 100644
index 0000000..9e35e2f
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/config_value_pool.py
@@ -0,0 +1,154 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import os
+import pickle
+import uuid
+
+
+class ParameterValueError(Exception):
+    pass
+
+
+class ParameterValuePool(object):
+    def __init__(self, log, name, value_iter):
+        self._log = log
+        self._name = name
+
+        self._used_pool_values = []
+        self._available_pool_values = list(value_iter)
+
+        self._backing_filepath = os.path.join(
+                os.environ["RIFT_ARTIFACTS"],
+                "parameter_pools",
+                self._name
+                )
+
+        self._read_used_pool_values()
+
+    def _save_used_pool_values(self):
+        dir_path = os.path.dirname(self._backing_filepath)
+        if not os.path.exists(dir_path):
+            try:
+                os.makedirs(dir_path, exist_ok=True)
+            except OSError as e:
+                self._log.error("Could not create directory for save used pool: %s", str(e))
+
+        try:
+            with open(self._backing_filepath, "wb") as hdl:
+                pickle.dump(self._used_pool_values, hdl)
+        except OSError as e:
+            self._log.error(
+                    "Could not open the parameter value pool file: %s",
+                    str(e))
+        except pickle.PickleError as e:
+            self._log.error(
+                    "Could not pickle the used parameter value pool: %s",
+                    str(e))
+
+    def _read_used_pool_values(self):
+        try:
+            with open(self._backing_filepath, 'rb') as hdl:
+                self._used_pool_values = pickle.load(hdl)
+
+        except (OSError, EOFError):
+            self._log.warning("Could not read from backing file: %s",
+                              self._backing_filepath)
+            self._used_pool_values = []
+
+        except pickle.PickleError as e:
+            self._log.warning("Could not unpickle the used parameter value pool from %s: %s",
+                              self._backing_filepath, str(e))
+            self._used_pool_values = []
+
+        for value in self._used_pool_values:
+            self._available_pool_values.remove(value)
+
+    def get_next_unused_value(self):
+        if len(self._available_pool_values) == 0:
+            raise ParameterValueError("Not more parameter values to to allocate")
+
+        next_value = self._available_pool_values[0]
+        self._log.debug("Got next value for parameter pool %s: %s", self._name, next_value)
+
+        return next_value
+
+    def add_used_value(self, value):
+        value = int(value)
+
+        if len(self._available_pool_values) == 0:
+            raise ParameterValueError("Not more parameter values to to allocate")
+
+        if value in self._used_pool_values:
+            raise ParameterValueError(
+                    "Primitive value of {} was already used for pool name: {}".format(
+                        value,
+                        self._name,
+                        )
+                    )
+
+        if value != self._available_pool_values[0]:
+            raise ParameterValueError("Parameter value not the next in the available list: %s", value)
+
+        self._available_pool_values.pop(0)
+        self._used_pool_values.append(value)
+        self._save_used_pool_values()
+
+    def remove_used_value(self, value):
+        if value not in self._used_pool_values:
+            self._log.warning("Primitive value of %s was never allocated for pool name: %s",
+                    value, self._name
+                    )
+            return
+
+        self._used_pool_values.remove(value)
+        self._available_pool_values.insert(0, value)
+        self._save_used_pool_values()
+
+
+if __name__ == "__main__":
+    import logging
+    logging.basicConfig(level=logging.DEBUG)
+    logger = logging.getLogger("config_value_pool.py")
+    name = str(uuid.uuid4())
+    param_pool = ParameterValuePool(logger, name, range(1000, 2000))
+
+    a = param_pool.get_next_unused_value()
+    assert a == 1000
+
+    param_pool.add_used_value(a)
+
+    a = param_pool.get_next_unused_value()
+    assert a == 1001
+    param_pool.add_used_value(a)
+
+    param_pool = ParameterValuePool(logger, name, range(1000, 2000))
+    a = param_pool.get_next_unused_value()
+    assert a == 1002
+
+    try:
+        param_pool.add_used_value(1004)
+    except ParameterValueError:
+        pass
+    else:
+        assert False
+
+    a = param_pool.get_next_unused_value()
+    assert a == 1002
+    param_pool.add_used_value(1002)
+
+    param_pool = ParameterValuePool(logger, name, range(1005, 2000))
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py
new file mode 100644
index 0000000..c942003
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py
@@ -0,0 +1,709 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import os
+import sys
+import time
+import yaml
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+from gi.repository import (
+    RwDts as rwdts,
+    RwVnfrYang,
+)
+
+import rift.openmano.rift2openmano as rift2openmano
+import rift.openmano.openmano_client as openmano_client
+from . import rwnsmplugin
+
+import rift.tasklets
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+DUMP_OPENMANO_DIR = os.path.join(
+        os.environ["RIFT_ARTIFACTS"],
+        "openmano_descriptors"
+        )
+
+
+def dump_openmano_descriptor(name, descriptor_str):
+    filename = "{}_{}.yaml".format(
+        time.strftime("%Y%m%d-%H%M%S"),
+        name
+        )
+
+    filepath = os.path.join(
+            DUMP_OPENMANO_DIR,
+            filename
+            )
+
+    try:
+        if not os.path.exists(DUMP_OPENMANO_DIR):
+            os.makedirs(DUMP_OPENMANO_DIR)
+
+        with open(filepath, 'w') as hdl:
+            hdl.write(descriptor_str)
+
+    except OSError as e:
+        print("Failed to dump openmano descriptor: %s" % str(e))
+
+    return filepath
+
+class VnfrConsoleOperdataDtsHandler(object):
+    """ registers 'D,/vnfr:vnfr-console/vnfr:vnfr[id]/vdur[id]' and handles CRUD from DTS"""
+    @property
+    def vnfr_vdu_console_xpath(self):
+        """ path for resource-mgr"""
+        return ("D,/rw-vnfr:vnfr-console/rw-vnfr:vnfr[rw-vnfr:id='{}']/rw-vnfr:vdur[vnfr:id='{}']".format(self._vnfr_id,self._vdur_id))
+
+    def __init__(self, dts, log, loop, nsr, vnfr_id, vdur_id, vdu_id):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._regh = None
+        self._nsr = nsr
+
+        self._vnfr_id = vnfr_id
+        self._vdur_id = vdur_id
+        self._vdu_id = vdu_id
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for VNFR VDU Operational Data read from dts """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            xpath = ks_path.to_xpath(RwVnfrYang.get_schema())
+            self._log.debug(
+                "Got VNFR VDU Opdata xact_info: %s, action: %s): %s:%s",
+                xact_info, action, xpath, msg
+                )
+
+            if action == rwdts.QueryAction.READ:
+                schema = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                self._log.debug("VDU Opdata path is {}".format(path_entry))
+
+                try:
+                    console_url = yield from self._loop.run_in_executor(
+                            None,
+                            self._nsr._http_api.get_instance_vm_console_url,
+                            self._nsr._nsr_uuid,
+                            self._vdur_id
+                            )
+
+                    self._log.debug("Got console response: %s for NSR ID %s vdur ID %s",
+                                        console_url,
+                                        self._nsr._nsr_uuid,
+                                        self._vdur_id
+                                       )
+                    vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+                    vdur_console.id = self._vdur_id
+                    if console_url:
+                        vdur_console.console_url = console_url
+                    else:
+                        vdur_console.console_url = 'none'
+                    self._log.debug("Recevied console URL for vdu {} is {}".format(self._vdu_id,vdur_console))
+                except openmano_client.InstanceStatusError as e:
+                    self._log.error("Could not get NS instance console URL: %s",
+                                        str(e))
+                    vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+                    vdur_console.id = self._vdur_id
+                    vdur_console.console_url = 'none'
+                      
+                xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK,
+                                            xpath=self.vnfr_vdu_console_xpath,
+                                            msg=vdur_console)
+            else:
+                #raise VnfRecordError("Not supported operation %s" % action)
+                self._log.error("Not supported operation %s" % action)
+                xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK)
+                return 
+
+        self._log.debug("Registering for VNFR VDU using xpath: %s",
+                        self.vnfr_vdu_console_xpath)
+        hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=self.vnfr_vdu_console_xpath,
+                                        handler=hdl,
+                                        flags=rwdts.Flag.PUBLISHER,
+                                        )
+
+
+
+class OpenmanoVnfr(object):
+    def __init__(self, log, loop, cli_api, vnfr):
+        self._log = log
+        self._loop = loop
+        self._cli_api = cli_api
+        self._vnfr = vnfr
+        self._vnfd_id = vnfr.vnfd.id
+
+        self._vnf_id = None
+
+        self._created = False
+
+    @property
+    def vnfd(self):
+        return rift2openmano.RiftVNFD(self._vnfr.vnfd)
+
+    @property
+    def vnfr(self):
+        return self._vnfr
+
+    @property
+    def rift_vnfd_id(self):
+        return self._vnfd_id
+
+    @property
+    def openmano_vnfd_id(self):
+        return self._vnf_id
+
+    @property
+    def openmano_vnfd(self):
+        self._log.debug("Converting vnfd %s from rift to openmano", self.vnfd.id)
+        openmano_vnfd = rift2openmano.rift2openmano_vnfd(self.vnfd)
+        return openmano_vnfd
+
+    @property
+    def openmano_vnfd_yaml(self):
+        return yaml.safe_dump(self.openmano_vnfd, default_flow_style=False)
+
+    @asyncio.coroutine
+    def create(self):
+        self._log.debug("Creating openmano vnfd")
+        openmano_vnfd = self.openmano_vnfd
+        name = openmano_vnfd["vnf"]["name"]
+
+        # If the name already exists, get the openmano vnfd id
+        name_uuid_map = yield from self._loop.run_in_executor(
+                    None,
+                    self._cli_api.vnf_list,
+                    )
+
+        if name in name_uuid_map:
+            vnf_id = name_uuid_map[name]
+            self._log.debug("Vnf already created.  Got existing openmano vnfd id: %s", vnf_id)
+            self._vnf_id = vnf_id
+            return
+
+        self._vnf_id, _ = yield from self._loop.run_in_executor(
+                None,
+                self._cli_api.vnf_create,
+                self.openmano_vnfd_yaml,
+                )
+
+        fpath = dump_openmano_descriptor(
+           "{}_vnf".format(name),
+           self.openmano_vnfd_yaml
+           )
+
+        self._log.debug("Dumped Openmano VNF descriptor to: %s", fpath)
+
+        self._created = True
+
+    @asyncio.coroutine
+    def delete(self):
+        if not self._created:
+            return
+
+        self._log.debug("Deleting openmano vnfd")
+        if self._vnf_id is None:
+            self._log.warning("Openmano vnf id not set.  Cannot delete.")
+            return
+
+        yield from self._loop.run_in_executor(
+                None,
+                self._cli_api.vnf_delete,
+                self._vnf_id,
+                )
+
+
+class OpenmanoNsr(object):
+    TIMEOUT_SECS = 120
+
+    def __init__(self, dts, log, loop, publisher, cli_api, http_api, nsd_msg, nsr_config_msg):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._publisher = publisher
+        self._cli_api = cli_api
+        self._http_api = http_api
+
+        self._nsd_msg = nsd_msg
+        self._nsr_config_msg = nsr_config_msg
+
+        self._vnfrs = []
+        self._vdur_console_handler = {}
+
+        self._nsd_uuid = None
+        self._nsr_uuid = None
+
+        self._created = False
+
+        self._monitor_task = None
+
+    @property
+    def nsd(self):
+        return rift2openmano.RiftNSD(self._nsd_msg)
+
+    @property
+    def vnfds(self):
+        return {v.rift_vnfd_id: v.vnfd for v in self._vnfrs}
+
+    @property
+    def vnfrs(self):
+        return self._vnfrs
+
+    @property
+    def openmano_nsd_yaml(self):
+        self._log.debug("Converting nsd %s from rift to openmano", self.nsd.id)
+        openmano_nsd = rift2openmano.rift2openmano_nsd(self.nsd, self.vnfds)
+        return yaml.safe_dump(openmano_nsd, default_flow_style=False)
+
+
+    @property
+    def openmano_instance_create_yaml(self):
+        self._log.debug("Creating instance-scenario-create input file for nsd %s with name %s", self.nsd.id, self._nsr_config_msg.name)
+        openmano_instance_create = {}
+        openmano_instance_create["name"] = self._nsr_config_msg.name
+        openmano_instance_create["description"] = self._nsr_config_msg.description
+        openmano_instance_create["scenario"] = self._nsd_uuid
+        if self._nsr_config_msg.has_field("om_datacenter"):
+            openmano_instance_create["datacenter"] = self._nsr_config_msg.om_datacenter
+        openmano_instance_create["networks"] = {}
+        for vld_msg in self._nsd_msg.vld:
+            if vld_msg.vim_network_name:
+                network = {}
+                network["name"] = vld_msg.name
+                network["netmap-use"] = vld_msg.vim_network_name
+                #network["datacenter"] = vld_msg.om_datacenter
+                openmano_instance_create["networks"][vld_msg.name] = network 
+             
+        return yaml.safe_dump(openmano_instance_create, default_flow_style=False)
+
+
+    @asyncio.coroutine
+    def add_vnfr(self, vnfr):
+        vnfr = OpenmanoVnfr(self._log, self._loop, self._cli_api, vnfr)
+        yield from vnfr.create()
+        self._vnfrs.append(vnfr)
+
+    @asyncio.coroutine
+    def delete(self):
+        if not self._created:
+            self._log.debug("NSD wasn't created.  Skipping delete.")
+            return
+
+        self._log.debug("Deleting openmano nsr")
+
+        yield from self._loop.run_in_executor(
+               None,
+               self._cli_api.ns_delete,
+               self._nsd_uuid,
+               )
+
+        self._log.debug("Deleting openmano vnfrs")
+        for vnfr in self._vnfrs:
+            yield from vnfr.delete()
+
+    @asyncio.coroutine
+    def create(self):
+        self._log.debug("Creating openmano scenario")
+        name_uuid_map = yield from self._loop.run_in_executor(
+                None,
+                self._cli_api.ns_list,
+                )
+
+        if self._nsd_msg.name in name_uuid_map:
+            self._log.debug("Found existing openmano scenario")
+            self._nsd_uuid = name_uuid_map[self._nsd_msg.name]
+            return
+
+
+        # Use the nsd uuid as the scenario name to rebind to existing
+        # scenario on reload or to support muliple instances of the name
+        # nsd
+        self._nsd_uuid, _ = yield from self._loop.run_in_executor(
+                None,
+                self._cli_api.ns_create,
+                self.openmano_nsd_yaml,
+                self._nsd_msg.name
+                )
+        fpath = dump_openmano_descriptor(
+           "{}_nsd".format(self._nsd_msg.name),
+           self.openmano_nsd_yaml,
+           )
+
+        self._log.debug("Dumped Openmano NS descriptor to: %s", fpath)
+
+        self._created = True
+
+    @asyncio.coroutine
+    def instance_monitor_task(self):
+        self._log.debug("Starting Instance monitoring task")
+
+        start_time = time.time()
+        active_vnfs = []
+
+        while True:
+            yield from asyncio.sleep(1, loop=self._loop)
+
+            try:
+                instance_resp_json = yield from self._loop.run_in_executor(
+                        None,
+                        self._http_api.get_instance,
+                        self._nsr_uuid,
+                        )
+
+                self._log.debug("Got instance response: %s for NSR ID %s",
+                        instance_resp_json,
+                        self._nsr_uuid)
+
+            except openmano_client.InstanceStatusError as e:
+                self._log.error("Could not get NS instance status: %s", str(e))
+                continue
+
+            def all_vms_active(vnf):
+                for vm in vnf["vms"]:
+                    vm_status = vm["status"]
+                    vm_uuid = vm["uuid"]
+                    if vm_status != "ACTIVE":
+                        self._log.debug("VM is not yet active: %s (status: %s)", vm_uuid, vm_status)
+                        return False
+
+                return True
+
+            def any_vm_active_nomgmtip(vnf):
+                for vm in vnf["vms"]:
+                    vm_status = vm["status"]
+                    vm_uuid = vm["uuid"]
+                    if vm_status != "ACTIVE":
+                        self._log.debug("VM is not yet active: %s (status: %s)", vm_uuid, vm_status)
+                        return False
+
+                return True
+
+            def any_vms_error(vnf):
+                for vm in vnf["vms"]:
+                    vm_status = vm["status"]
+                    vm_vim_info = vm["vim_info"]
+                    vm_uuid = vm["uuid"]
+                    if vm_status == "ERROR":
+                        self._log.error("VM Error: %s (vim_info: %s)", vm_uuid, vm_vim_info)
+                        return True
+
+                return False
+
+            def get_vnf_ip_address(vnf):
+                if "ip_address" in vnf:
+                    return vnf["ip_address"].strip()
+                return None
+
+            def get_ext_cp_info(vnf):
+                cp_info_list = []
+                for vm in vnf["vms"]:
+                    if "interfaces" not in vm:
+                        continue
+
+                    for intf in vm["interfaces"]:
+                        if "external_name" not in intf:
+                            continue
+
+                        if not intf["external_name"]:
+                            continue
+
+                        ip_address = intf["ip_address"]
+                        if ip_address is None:
+                            ip_address = "0.0.0.0"
+
+                        cp_info_list.append((intf["external_name"], ip_address))
+
+                return cp_info_list
+
+            def get_vnf_status(vnfr):
+                # When we create an openmano descriptor we use <name>__<idx>
+                # to come up with openmano constituent VNF name.  Use this
+                # knowledge to map the vnfr back.
+                openmano_vnfr_suffix = "__{}".format(
+                        vnfr.vnfr.vnfr_msg.member_vnf_index_ref
+                        )
+
+                for vnf in instance_resp_json["vnfs"]:
+                    if vnf["vnf_name"].endswith(openmano_vnfr_suffix):
+                        return vnf
+
+                self._log.warning("Could not find vnf status with name that ends with: %s",
+                                  openmano_vnfr_suffix)
+                return None
+
+            for vnfr in self._vnfrs:
+                if vnfr in active_vnfs:
+                    # Skipping, so we don't re-publish the same VNF message.
+                    continue
+
+                vnfr_msg = vnfr.vnfr.vnfr_msg.deep_copy()
+                vnfr_msg.operational_status = "init"
+
+                try:
+                    vnf_status = get_vnf_status(vnfr)
+                    self._log.debug("Found VNF status: %s", vnf_status)
+                    if vnf_status is None:
+                        self._log.error("Could not find VNF status from openmano")
+                        vnfr_msg.operational_status = "failed"
+                        yield from self._publisher.publish_vnfr(None, vnfr_msg)
+                        return
+
+                    # If there was a VNF that has a errored VM, then just fail the VNF and stop monitoring.
+                    if any_vms_error(vnf_status):
+                        self._log.debug("VM was found to be in error state.  Marking as failed.")
+                        vnfr_msg.operational_status = "failed"
+                        yield from self._publisher.publish_vnfr(None, vnfr_msg)
+                        return
+
+                    if all_vms_active(vnf_status):
+                        vnf_ip_address = get_vnf_ip_address(vnf_status)
+
+                        if vnf_ip_address is None:
+                            self._log.warning("No IP address obtained "
+                                    "for VNF: {}, will retry.".format(
+                                        vnf_status['vnf_name']))
+                            continue
+
+                        self._log.debug("All VMs in VNF are active.  Marking as running.")
+                        vnfr_msg.operational_status = "running"
+
+                        self._log.debug("Got VNF ip address: %s", vnf_ip_address)
+                        vnfr_msg.mgmt_interface.ip_address = vnf_ip_address
+                        vnfr_msg.vnf_configuration.config_access.mgmt_ip_address = vnf_ip_address
+
+
+                        for vm in vnf_status["vms"]:
+                            if vm["uuid"] not in self._vdur_console_handler:
+                                vdur_console_handler = VnfrConsoleOperdataDtsHandler(self._dts, self._log, self._loop, 
+                                                    self, vnfr_msg.id,vm["uuid"],vm["name"])
+                                yield from vdur_console_handler.register()
+                                self._vdur_console_handler[vm["uuid"]] = vdur_console_handler
+                             
+                            vdur_msg = vnfr_msg.vdur.add()
+                            vdur_msg.vim_id = vm["vim_vm_id"]
+                            vdur_msg.id = vm["uuid"]
+
+                        # Add connection point information for the config manager
+                        cp_info_list = get_ext_cp_info(vnf_status)
+                        for (cp_name, cp_ip) in cp_info_list:
+                            cp = vnfr_msg.connection_point.add()
+                            cp.name = cp_name
+                            cp.short_name = cp_name
+                            cp.ip_address = cp_ip
+
+                        yield from self._publisher.publish_vnfr(None, vnfr_msg)
+                        active_vnfs.append(vnfr)
+
+                    if (time.time() - start_time) > OpenmanoNsr.TIMEOUT_SECS:
+                        self._log.error("NSR timed out before reaching running state")
+                        vnfr_msg.operational_status = "failed"
+                        yield from self._publisher.publish_vnfr(None, vnfr_msg)
+                        return
+
+                except Exception as e:
+                    vnfr_msg.operational_status = "failed"
+                    yield from self._publisher.publish_vnfr(None, vnfr_msg)
+                    self._log.exception("Caught exception publishing vnfr info: %s", str(e))
+                    return
+
+            if len(active_vnfs) == len(self._vnfrs):
+                self._log.info("All VNF's are active.  Exiting NSR monitoring task")
+                return
+
+    @asyncio.coroutine
+    def deploy(self):
+        if self._nsd_uuid is None:
+            raise ValueError("Cannot deploy an uncreated nsd")
+
+        self._log.debug("Deploying openmano scenario")
+
+        name_uuid_map = yield from self._loop.run_in_executor(
+                None,
+                self._cli_api.ns_instance_list,
+                )
+
+        if self._nsr_config_msg.name in name_uuid_map:
+            self._log.debug("Found existing instance with nsr name: %s", self._nsr_config_msg.name)
+            self._nsr_uuid = name_uuid_map[self._nsr_config_msg.name]
+        else:
+            self._nsr_uuid = yield from self._loop.run_in_executor(
+                    None,
+                    self._cli_api.ns_instance_scenario_create,
+                    self.openmano_instance_create_yaml)
+
+            fpath = dump_openmano_descriptor(
+               "{}_instance_sce_create".format(self._nsr_config_msg.name),
+               self.openmano_instance_create_yaml,
+               )
+
+            self._log.debug("Dumped Openmano NS Scenario Cretae to: %s", fpath)
+
+
+        self._monitor_task = asyncio.ensure_future(
+                self.instance_monitor_task(), loop=self._loop
+                )
+
+    @asyncio.coroutine
+    def terminate(self):
+
+        for _,handler in  self._vdur_console_handler.items():
+            handler._regh.deregister()
+    
+        if self._nsr_uuid is None:
+            self._log.warning("Cannot terminate an un-instantiated nsr")
+            return
+
+        if self._monitor_task is not None:
+            self._monitor_task.cancel()
+            self._monitor_task = None
+
+        self._log.debug("Terminating openmano nsr")
+        yield from self._loop.run_in_executor(
+               None,
+               self._cli_api.ns_terminate,
+               self._nsr_uuid,
+               )
+
+
+class OpenmanoNsPlugin(rwnsmplugin.NsmPluginBase):
+    """
+        RW Implentation of the NsmPluginBase
+    """
+    def __init__(self, dts, log, loop, publisher, ro_account):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._publisher = publisher
+
+        self._cli_api = None
+        self._http_api = None
+        self._openmano_nsrs = {}
+
+        self._set_ro_account(ro_account)
+
+    def _set_ro_account(self, ro_account):
+        self._log.debug("Setting openmano plugin cloud account: %s", ro_account)
+        self._cli_api = openmano_client.OpenmanoCliAPI(
+                self.log,
+                ro_account.openmano.host,
+                ro_account.openmano.port,
+                ro_account.openmano.tenant_id,
+                )
+
+        self._http_api = openmano_client.OpenmanoHttpAPI(
+                self.log,
+                ro_account.openmano.host,
+                ro_account.openmano.port,
+                ro_account.openmano.tenant_id,
+                )
+
+    def create_nsr(self, nsr_config_msg, nsd_msg):
+        """
+        Create Network service record
+        """
+        openmano_nsr = OpenmanoNsr(
+                self._dts,
+                self._log,
+                self._loop,
+                self._publisher,
+                self._cli_api,
+                self._http_api,
+                nsd_msg,
+                nsr_config_msg
+                )
+        self._openmano_nsrs[nsr_config_msg.id] = openmano_nsr
+
+    @asyncio.coroutine
+    def deploy(self, nsr_msg):
+        openmano_nsr = self._openmano_nsrs[nsr_msg.ns_instance_config_ref]
+        yield from openmano_nsr.create()
+        yield from openmano_nsr.deploy()
+
+    @asyncio.coroutine
+    def instantiate_ns(self, nsr, xact):
+        """
+        Instantiate NSR with the passed nsr id
+        """
+        yield from nsr.instantiate(xact)
+
+    @asyncio.coroutine
+    def instantiate_vnf(self, nsr, vnfr):
+        """
+        Instantiate NSR with the passed nsr id
+        """
+        openmano_nsr = self._openmano_nsrs[nsr.id]
+        yield from openmano_nsr.add_vnfr(vnfr)
+
+        # Mark the VNFR as running
+        # TODO: Create a task to monitor nsr/vnfr status
+        vnfr_msg = vnfr.vnfr_msg.deep_copy()
+        vnfr_msg.operational_status = "init"
+
+        self._log.debug("Attempting to publish openmano vnf: %s", vnfr_msg)
+        with self._dts.transaction() as xact:
+            yield from self._publisher.publish_vnfr(xact, vnfr_msg)
+
+    @asyncio.coroutine
+    def instantiate_vl(self, nsr, vlr):
+        """
+        Instantiate NSR with the passed nsr id
+        """
+        pass
+
+    @asyncio.coroutine
+    def terminate_ns(self, nsr):
+        """
+        Terminate the network service
+        """
+        nsr_id = nsr.id
+        openmano_nsr = self._openmano_nsrs[nsr_id]
+        yield from openmano_nsr.terminate()
+        yield from openmano_nsr.delete()
+
+        with self._dts.transaction() as xact:
+            for vnfr in openmano_nsr.vnfrs:
+                self._log.debug("Unpublishing VNFR: %s", vnfr.vnfr.vnfr_msg)
+                yield from self._publisher.unpublish_vnfr(xact, vnfr.vnfr.vnfr_msg)
+
+        del self._openmano_nsrs[nsr_id]
+
+    @asyncio.coroutine
+    def terminate_vnf(self, vnfr):
+        """
+        Terminate the network service
+        """
+        pass
+
+    @asyncio.coroutine
+    def terminate_vl(self, vlr):
+        """
+        Terminate the virtual link
+        """
+        pass
+
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py
new file mode 100644
index 0000000..6c4b123
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py
@@ -0,0 +1,289 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import concurrent.futures
+import json
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwTypes,
+    RwVnfdYang,
+    RwYang
+    )
+import rift.tasklets
+
+import requests
+
+
+class NsrOpDataDtsHandler(object):
+    """ The network service op data DTS handler """
+    XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
+
+    def __init__(self, dts, log, loop):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return the registration handle"""
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for Nsr op data publisher registration"""
+        self._log.debug("Registering Nsr op data path %s as publisher",
+                        NsrOpDataDtsHandler.XPATH)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler()
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ)
+
+    @asyncio.coroutine
+    def create(self, xact, path, msg):
+        """
+        Create an NS record in DTS with the path and message
+        """
+        self._log.debug("Creating NSR xact = %s, %s:%s", xact, path, msg)
+        self.regh.create_element(path, msg)
+        self._log.debug("Created NSR xact = %s, %s:%s", xact, path, msg)
+
+    @asyncio.coroutine
+    def update(self, xact, path, msg, flags=rwdts.XactFlag.REPLACE):
+        """
+        Update an NS record in DTS with the path and message
+        """
+        self._log.debug("Updating NSR xact = %s, %s:%s regh = %s", xact, path, msg, self.regh)
+        self.regh.update_element(path, msg, flags)
+        self._log.debug("Updated NSR xact = %s, %s:%s", xact, path, msg)
+
+    @asyncio.coroutine
+    def delete(self, xact, path):
+        """
+        Update an NS record in DTS with the path and message
+        """
+        self._log.debug("Deleting NSR xact:%s, path:%s", xact, path)
+        self.regh.delete_element(path)
+        self._log.debug("Deleted NSR xact:%s, path:%s", xact, path)
+
+
+
+class VnfrPublisherDtsHandler(object):
+    """ Registers 'D,/vnfr:vnfr-catalog/vnfr:vnfr' DTS"""
+    XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+
+    def __init__(self, dts, log, loop):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle"""
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for Vvnfr create/update/delete/read requests from dts """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            self._log.debug(
+                "Got vnfr on_prepare callback (xact_info: %s, action: %s): %s",
+                xact_info, action, msg
+                )
+            raise NotImplementedError(
+                "%s action on VirtualNetworkFunctionRecord not supported",
+                action)
+
+        self._log.debug("Registering for VNFR using xpath: %s",
+                        VnfrPublisherDtsHandler.XPATH,)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler()
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=VnfrPublisherDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=(rwdts.Flag.PUBLISHER |
+                                               rwdts.Flag.NO_PREP_READ |
+                                               rwdts.Flag.CACHE),)
+
+    @asyncio.coroutine
+    def create(self, xact, path, msg):
+        """
+        Create a VNFR record in DTS with path and message
+        """
+        self._log.debug("Creating VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.create_element(path, msg)
+        self._log.debug("Created VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def update(self, xact, path, msg):
+        """
+        Update a VNFR record in DTS with path and message
+        """
+        self._log.debug("Updating VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.update_element(path, msg)
+        self._log.debug("Updated VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def delete(self, xact, path):
+        """
+        Delete a VNFR record in DTS with path and message
+        """
+        self._log.debug("Deleting VNFR xact = %s, %s", xact, path)
+        self.regh.delete_element(path)
+        self._log.debug("Deleted VNFR xact = %s, %s", xact, path)
+
+
+class VlrPublisherDtsHandler(object):
+    """ registers 'D,/vlr:vlr-catalog/vlr:vlr """
+    XPATH = "D,/vlr:vlr-catalog/vlr:vlr"
+
+    def __init__(self, dts, log, loop):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle"""
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for vlr create/update/delete/read requests from dts """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            self._log.debug(
+                "Got vlr on_prepare callback (xact_info: %s, action: %s): %s",
+                xact_info, action, msg
+                )
+            raise NotImplementedError(
+                "%s action on VirtualLinkRecord not supported",
+                action)
+
+        self._log.debug("Registering for VLR using xpath: %s",
+                        VlrPublisherDtsHandler.XPATH,)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler()
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=VlrPublisherDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=(rwdts.Flag.PUBLISHER |
+                                               rwdts.Flag.NO_PREP_READ |
+                                               rwdts.Flag.CACHE),)
+
+    @asyncio.coroutine
+    def create(self, xact, path, msg):
+        """
+        Create a VLR record in DTS with path and message
+        """
+        self._log.debug("Creating VLR xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.create_element(path, msg)
+        self._log.debug("Created VLR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def update(self, xact, path, msg):
+        """
+        Update a VLR record in DTS with path and message
+        """
+        self._log.debug("Updating VLR xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.update_element(path, msg)
+        self._log.debug("Updated VLR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def delete(self, xact, path):
+        """
+        Delete a VLR record in DTS with path and message
+        """
+        self._log.debug("Deleting VLR xact = %s, %s", xact, path)
+        self.regh.delete_element(path)
+        self._log.debug("Deleted VLR xact = %s, %s", xact, path)
+
+
+class VnfdPublisher(object):
+    AUTH = ('admin', 'admin')
+    HEADERS = {"content-type": "application/vnd.yang.data+json"}
+
+
+    def __init__(self, use_ssl, ssl_cert, ssl_key, loop):
+        self.use_ssl = use_ssl
+        self.ssl_cert = ssl_cert
+        self.ssl_key = ssl_key
+        self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
+        self.loop = loop
+
+    @asyncio.coroutine
+    def update(self, vnfd):
+        def update(vnfd):
+            """
+            Update VNFD record using rest API, as the config data is handled
+            by uAgent and stored in CDB
+            """
+
+            scheme = "https" if self.use_ssl else "http"
+
+            url = "{}://127.0.0.1:8008/api/config/vnfd-catalog/vnfd/{}"
+
+            model = RwYang.Model.create_libncx()
+            model.load_module("rw-vnfd")
+            model.load_module("vnfd")
+
+            data = vnfd.to_json(model)
+
+            key = "vnfd:vnfd-catalog"
+            newdict = json.loads(data)
+            if key in newdict:
+                data = json.dumps(newdict[key])
+
+            options = {"data": data,
+                       "headers": VnfdPublisher.HEADERS,
+                       "auth": VnfdPublisher.AUTH}
+
+            if self.use_ssl:
+                options["verify"] = False
+                options["cert"] = (self.ssl_cert, self.ssl_key)
+
+            response = requests.put(
+                url.format(scheme, vnfd.id),
+                **options
+            )
+
+        status = yield from self.loop.run_in_executor(
+            None,
+            update,
+            vnfd
+            )
+
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py
new file mode 100644
index 0000000..01c0dcb
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py
@@ -0,0 +1,150 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import ncclient
+import ncclient.asyncio_manager
+import re
+import time
+
+import gi
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('NsrYang', '1.0')
+
+from gi.repository import (
+    NsrYang as nsrY,
+    RwYang,
+    RwNsmYang as nsmY,
+    RwDts as rwdts,
+    RwTypes,
+    RwConmanYang as conmanY,
+)
+
+import rift.tasklets
+
+class ROConfigManager(object):
+    def __init__(self, log, loop, dts, parent):
+        self._log = log
+        self._loop = loop
+        self._dts = dts
+        self.nsm = parent
+        self._log.debug("Initialized ROConfigManager")
+
+    def is_ready(self):
+        return True
+
+    @property
+    def cm_state_xpath(self):
+        return ("/rw-conman:cm-state/rw-conman:cm-nsr")
+
+    @classmethod
+    def map_config_status(cls, status):
+        cfg_map = {
+            'init': nsrY.ConfigStates.INIT,
+            'received': nsrY.ConfigStates.CONFIGURING,
+            'cfg_delay': nsrY.ConfigStates.CONFIGURING,
+            'cfg_process': nsrY.ConfigStates.CONFIGURING,
+            'cfg_process-failed': nsrY.ConfigStates.CONFIGURING,
+            'cfg_sched': nsrY.ConfigStates.CONFIGURING,
+            'connecting': nsrY.ConfigStates.CONFIGURING,
+            'failed_connection': nsrY.ConfigStates.CONFIGURING,
+            'netconf_connected': nsrY.ConfigStates.CONFIGURING,
+            'netconf_ssh_connected': nsrY.ConfigStates.CONFIGURING,
+            'restconf_connected': nsrY.ConfigStates.CONFIGURING,
+            'cfg_send': nsrY.ConfigStates.CONFIGURING,
+            'cfg_failed': nsrY.ConfigStates.FAILED,
+            'ready_no_cfg': nsrY.ConfigStates.CONFIG_NOT_NEEDED,
+            'ready': nsrY.ConfigStates.CONFIGURED,
+        }
+
+        return cfg_map[status]
+
+    @asyncio.coroutine
+    def update_ns_cfg_state(self, cm_nsr):
+        if cm_nsr is None:
+            return
+
+        try:
+            nsrid = cm_nsr['id']
+
+            # Update the VNFRs' config status
+            gen = []
+            if 'cm_vnfr' in cm_nsr:
+                gen = (vnfr for vnfr in cm_nsr['cm_vnfr']
+                       if vnfr['id'] in self.nsm._vnfrs)
+
+            for vnfr in gen:
+                vnfrid = vnfr['id']
+                new_status = ROConfigManager.map_config_status(vnfr['state'])
+                self._log.debug("Updating config status of VNFR {} " \
+                                "in NSR {} to {}({})".
+                                format(vnfrid, nsrid, new_status,
+                                       vnfr['state']))
+                yield from \
+                    self.nsm.vnfrs[vnfrid].set_config_status(new_status)
+
+            # Update the NSR's config status
+            new_status = ROConfigManager.map_config_status(cm_nsr['state'])
+            self._log.debug("Updating config status of NSR {} to {}({})".
+                                format(nsrid, new_status, cm_nsr['state']))
+            yield from self.nsm.nsrs[nsrid].set_config_status(new_status, cm_nsr.get('state_details'))
+
+        except Exception as e:
+            self._log.error("Failed to process cm-state for nsr {}: {}".
+                            format(nsrid, e))
+            self._log.exception(e)
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for cm-state changes """
+        
+        @asyncio.coroutine
+        def on_prepare(xact_info, query_action, ks_path, msg):
+            """ cm-state changed """
+
+            #print("###>>> cm-state change ({}), msg_dict = {}".format(query_action, msg_dict))
+            self._log.debug("Received cm-state on_prepare (%s:%s:%s)",
+                            query_action,
+                            ks_path,
+                            msg)
+
+            if (query_action == rwdts.QueryAction.UPDATE or
+                query_action == rwdts.QueryAction.CREATE):
+                # Update Each NSR/VNFR state
+                msg_dict = msg.as_dict()
+                yield from self.update_ns_cfg_state(msg_dict)
+            elif query_action == rwdts.QueryAction.DELETE:
+                self._log.debug("DELETE action in on_prepare for cm-state, ignoring")
+            else:
+                raise NotImplementedError(
+                    "%s on cm-state is not supported",
+                    query_action)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        try:
+            handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
+            self.dts_reg_hdl = yield from self._dts.register(self.cm_state_xpath,
+                                                             flags=rwdts.Flag.SUBSCRIBER,
+                                                             handler=handler)
+        except Exception as e:
+            self._log.error("Failed to register for cm-state changes as %s", str(e))
+            
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmplugin.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmplugin.py
new file mode 100755
index 0000000..ec16259
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmplugin.py
@@ -0,0 +1,112 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import abc
+
+
+class NsmPluginBase(object):
+    """
+        Abstract base class for the NSM plugin.
+        There will be single instance of this plugin for each plugin type.
+    """
+
+    def __init__(self, dts, log, loop, nsm, plugin_name, dts_publisher):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+        self._plugin_name = plugin_name
+        self._dts_publisher = dts_publisher
+
+    @property
+    def dts(self):
+        return self._dts
+
+    @property
+    def log(self):
+        return self._log
+
+    @property
+    def loop(self):
+        return self._loop
+
+    @property
+    def nsm(self):
+        return self._nsm
+
+    def create_nsr(self, nsr):
+        """ Create an NSR """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def deploy(self, nsr_msg):
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def instantiate_ns(self, nsr, xact):
+        """ Instantiate the network service """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def instantiate_vnf(self, nsr, vnfr):
+        """ Instantiate the virtual network function """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def instantiate_vl(self, nsr, vl):
+        """ Instantiate the virtual link"""
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def get_nsr(self, nsr_path):
+        """ Get the NSR """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def get_vnfr(self, vnfr_path):
+        """ Get the VNFR """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def get_vlr(self, vlr_path):
+        """ Get the VLR """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def terminate_ns(self, nsr):
+        """Terminate the network service """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def terminate_vnf(self, vnfr):
+        """Terminate the VNF """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def terminate_vl(self, vlr):
+        """Terminate the Virtual Link Record"""
+        pass
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py
new file mode 100755
index 0000000..de21b5c
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py
@@ -0,0 +1,4323 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
+import asyncio
+import ncclient
+import ncclient.asyncio_manager
+import os
+import shutil
+import sys
+import tempfile
+import time
+import uuid
+import yaml
+
+
+from collections import deque
+from collections import defaultdict
+from enum import Enum
+
+import gi
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwNsdYang', '1.0')
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwVlrYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+from gi.repository import (
+    RwYang,
+    RwNsrYang,
+    NsrYang,
+    NsdYang,
+    RwVlrYang,
+    VnfrYang,
+    RwVnfrYang,
+    RwNsmYang,
+    RwsdnYang,
+    RwDts as rwdts,
+    RwTypes,
+    ProtobufC,
+)
+
+import rift.tasklets
+import rift.mano.ncclient
+import rift.mano.config_data.config
+import rift.mano.dts as mano_dts
+
+from . import rwnsm_conman as conman
+from . import cloud
+from . import publisher
+from . import xpath
+from . import config_value_pool
+from . import rwvnffgmgr
+from . import scale_group
+
+
+class NetworkServiceRecordState(Enum):
+    """ Network Service Record State """
+    INIT = 101
+    VL_INIT_PHASE = 102
+    VNF_INIT_PHASE = 103
+    VNFFG_INIT_PHASE = 104
+    RUNNING = 106
+    SCALING_OUT = 107
+    SCALING_IN = 108
+    TERMINATE = 109
+    TERMINATE_RCVD = 110
+    VL_TERMINATE_PHASE = 111
+    VNF_TERMINATE_PHASE = 112
+    VNFFG_TERMINATE_PHASE = 113
+    TERMINATED = 114
+    FAILED = 115
+    VL_INSTANTIATE = 116
+    VL_TERMINATE = 117
+
+
+class NetworkServiceRecordError(Exception):
+    """ Network Service Record Error """
+    pass
+
+
+class NetworkServiceDescriptorError(Exception):
+    """ Network Service Descriptor Error """
+    pass
+
+
+class VirtualNetworkFunctionRecordError(Exception):
+    """ Virtual Network Function Record Error """
+    pass
+
+
+class NetworkServiceDescriptorNotFound(Exception):
+    """ Cannot find Network Service Descriptor"""
+    pass
+
+
+class NetworkServiceDescriptorRefCountExists(Exception):
+    """ Network Service Descriptor reference count exists """
+    pass
+
+
+class NetworkServiceDescriptorUnrefError(Exception):
+    """ Failed to unref a network service descriptor """
+    pass
+
+
+class NsrInstantiationFailed(Exception):
+    """ Failed to instantiate network service """
+    pass
+
+
+class VnfInstantiationFailed(Exception):
+    """ Failed to instantiate virtual network function"""
+    pass
+
+
+class VnffgInstantiationFailed(Exception):
+    """ Failed to instantiate virtual network function"""
+    pass
+
+
+class VnfDescriptorError(Exception):
+    """Failed to instantiate virtual network function"""
+    pass
+
+
+class ScalingOperationError(Exception):
+    pass
+
+
+class ScaleGroupMissingError(Exception):
+    pass
+
+
+class PlacementGroupError(Exception):
+    pass
+
+
+class NsrNsdUpdateError(Exception):
+    pass
+
+
+class NsrVlUpdateError(NsrNsdUpdateError):
+    pass
+
+
+class VlRecordState(Enum):
+    """ VL Record State """
+    INIT = 101
+    INSTANTIATION_PENDING = 102
+    ACTIVE = 103
+    TERMINATE_PENDING = 104
+    TERMINATED = 105
+    FAILED = 106
+
+
+class VnffgRecordState(Enum):
+    """ VNFFG Record State """
+    INIT = 101
+    INSTANTIATION_PENDING = 102
+    ACTIVE = 103
+    TERMINATE_PENDING = 104
+    TERMINATED = 105
+    FAILED = 106
+
+
+class VnffgRecord(object):
+    """ Vnffg Records class"""
+    SFF_DP_PORT = 4790
+    SFF_MGMT_PORT = 5000
+    def __init__(self, dts, log, loop, vnffgmgr, nsr, nsr_name, vnffgd_msg, sdn_account_name):
+
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnffgmgr = vnffgmgr
+        self._nsr = nsr
+        self._nsr_name = nsr_name
+        self._vnffgd_msg = vnffgd_msg
+        if sdn_account_name is None:
+            self._sdn_account_name = ''
+        else:
+            self._sdn_account_name = sdn_account_name
+
+        self._vnffgr_id = str(uuid.uuid4())
+        self._vnffgr_rsp_id = list()
+        self._vnffgr_state = VnffgRecordState.INIT
+
+    @property
+    def id(self):
+        """ VNFFGR id """
+        return self._vnffgr_id
+
+    @property
+    def state(self):
+        """ state of this VNF """
+        return self._vnffgr_state
+
+    def fetch_vnffgr(self):
+        """
+        Get VNFFGR message to be published
+        """
+
+        if self._vnffgr_state == VnffgRecordState.INIT:
+            vnffgr_dict = {"id": self._vnffgr_id,
+                           "nsd_id": self._nsr.nsd_id,
+                           "vnffgd_id_ref": self._vnffgd_msg.id,
+                           "vnffgd_name_ref": self._vnffgd_msg.name,
+                           "sdn_account": self._sdn_account_name,
+                           "operational_status": 'init',
+                           }
+            vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+        elif self._vnffgr_state == VnffgRecordState.TERMINATED:
+            vnffgr_dict = {"id": self._vnffgr_id,
+                           "nsd_id": self._nsr.nsd_id,
+                           "vnffgd_id_ref": self._vnffgd_msg.id,
+                           "vnffgd_name_ref": self._vnffgd_msg.name,
+                           "sdn_account": self._sdn_account_name,
+                           "operational_status": 'terminated',
+                           }
+            vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+        else:
+            try:
+                vnffgr = self._vnffgmgr.fetch_vnffgr(self._vnffgr_id)
+            except Exception:
+                self._log.exception("Fetching VNFFGR for VNFFG with id %s failed", self._vnffgr_id)
+                self._vnffgr_state = VnffgRecordState.FAILED
+                vnffgr_dict = {"id": self._vnffgr_id,
+                               "nsd_id": self._nsr.nsd_id,
+                               "vnffgd_id_ref": self._vnffgd_msg.id,
+                               "vnffgd_name_ref": self._vnffgd_msg.name,
+                               "sdn_account": self._sdn_account_name,
+                               "operational_status": 'failed',
+                               }
+                vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+
+        return vnffgr
+
+    @asyncio.coroutine
+    def vnffgr_create_msg(self):
+        """ Virtual Link Record message for Creating VLR in VNS """
+        vnffgr_dict = {"id": self._vnffgr_id,
+                       "nsd_id": self._nsr.nsd_id,
+                       "vnffgd_id_ref": self._vnffgd_msg.id,
+                       "vnffgd_name_ref": self._vnffgd_msg.name,
+                       "sdn_account": self._sdn_account_name,
+                    }
+        vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+        for rsp in self._vnffgd_msg.rsp:
+            vnffgr_rsp = vnffgr.rsp.add()
+            vnffgr_rsp.id = str(uuid.uuid4())
+            vnffgr_rsp.name = self._nsr.name + '.' + rsp.name
+            self._vnffgr_rsp_id.append(vnffgr_rsp.id)
+            vnffgr_rsp.vnffgd_rsp_id_ref =  rsp.id
+            vnffgr_rsp.vnffgd_rsp_name_ref = rsp.name
+            for rsp_cp_ref in rsp.vnfd_connection_point_ref:
+                vnfd =  [vnfr.vnfd for vnfr in self._nsr.vnfrs.values() if vnfr.vnfd.id == rsp_cp_ref.vnfd_id_ref]
+                self._log.debug("VNFD message during VNFFG instantiation is %s",vnfd)
+                if len(vnfd) > 0 and vnfd[0].has_field('service_function_type'):
+                    self._log.debug("Service Function Type for VNFD ID %s is %s",rsp_cp_ref.vnfd_id_ref, vnfd[0].service_function_type)
+                else:
+                    self._log.error("Service Function Type not available for VNFD ID %s; Skipping in chain",rsp_cp_ref.vnfd_id_ref)
+                    continue
+
+                vnfr_cp_ref =  vnffgr_rsp.vnfr_connection_point_ref.add()
+                vnfr_cp_ref.member_vnf_index_ref = rsp_cp_ref.member_vnf_index_ref
+                vnfr_cp_ref.hop_number = rsp_cp_ref.order
+                vnfr_cp_ref.vnfd_id_ref =rsp_cp_ref.vnfd_id_ref
+                vnfr_cp_ref.service_function_type = vnfd[0].service_function_type
+                for nsr_vnfr in self._nsr.vnfrs.values():
+                   if (nsr_vnfr.vnfd.id == vnfr_cp_ref.vnfd_id_ref and
+                      nsr_vnfr.member_vnf_index == vnfr_cp_ref.member_vnf_index_ref):
+                       vnfr_cp_ref.vnfr_id_ref = nsr_vnfr.id
+                       vnfr_cp_ref.vnfr_name_ref = nsr_vnfr.name
+                       vnfr_cp_ref.vnfr_connection_point_ref = rsp_cp_ref.vnfd_connection_point_ref
+
+                       vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
+                       self._log.debug(" Received VNFR is %s", vnfr)
+                       while vnfr.operational_status != 'running':
+                           self._log.info("Received vnf op status is %s; retrying",vnfr.operational_status)
+                           if vnfr.operational_status == 'failed':
+                               self._log.error("Fetching VNFR for  %s failed", vnfr.id)
+                               raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" % (self.id, vnfr.id))
+                           yield from asyncio.sleep(2, loop=self._loop)
+                           vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
+                           self._log.debug("Received VNFR is %s", vnfr)
+
+                       vnfr_cp_ref.connection_point_params.mgmt_address =  vnfr.mgmt_interface.ip_address
+                       for cp in vnfr.connection_point:
+                           if cp.name == vnfr_cp_ref.vnfr_connection_point_ref:
+                               vnfr_cp_ref.connection_point_params.port_id = cp.connection_point_id
+                               vnfr_cp_ref.connection_point_params.name = self._nsr.name + '.' + cp.name
+                               for vdu in vnfr.vdur:
+                                   for ext_intf in vdu.external_interface:
+                                       if ext_intf.name == vnfr_cp_ref.vnfr_connection_point_ref:
+                                           vnfr_cp_ref.connection_point_params.vm_id =  vdu.vim_id
+                                           self._log.debug("VIM ID for CP %s in VNFR %s is %s",cp.name,nsr_vnfr.id,
+                                                            vnfr_cp_ref.connection_point_params.vm_id)
+                                           break
+
+                               vnfr_cp_ref.connection_point_params.address =  cp.ip_address
+                               vnfr_cp_ref.connection_point_params.port = VnffgRecord.SFF_DP_PORT
+
+        for vnffgd_classifier in self._vnffgd_msg.classifier:
+            _rsp =  [rsp for rsp in vnffgr.rsp if rsp.vnffgd_rsp_id_ref == vnffgd_classifier.rsp_id_ref]
+            if len(_rsp) > 0:
+                rsp_id_ref = _rsp[0].id
+                rsp_name = _rsp[0].name
+            else:
+                self._log.error("RSP with ID %s not found during classifier creation for classifier id %s",vnffgd_classifier.rsp_id_ref,vnffgd_classifier.id)
+                continue
+            vnffgr_classifier = vnffgr.classifier.add()
+            vnffgr_classifier.id = vnffgd_classifier.id
+            vnffgr_classifier.name =  self._nsr.name + '.' + vnffgd_classifier.name
+            _rsp[0].classifier_name = vnffgr_classifier.name
+            vnffgr_classifier.rsp_id_ref = rsp_id_ref
+            vnffgr_classifier.rsp_name = rsp_name
+            for nsr_vnfr in self._nsr.vnfrs.values():
+               if (nsr_vnfr.vnfd.id == vnffgd_classifier.vnfd_id_ref and
+                      nsr_vnfr.member_vnf_index == vnffgd_classifier.member_vnf_index_ref):
+                       vnffgr_classifier.vnfr_id_ref = nsr_vnfr.id
+                       vnffgr_classifier.vnfr_name_ref = nsr_vnfr.name
+                       vnffgr_classifier.vnfr_connection_point_ref = vnffgd_classifier.vnfd_connection_point_ref
+
+                       if nsr_vnfr.vnfd.service_function_chain == 'CLASSIFIER':
+                           vnffgr_classifier.sff_name = nsr_vnfr.name
+
+                       vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
+                       self._log.debug(" Received VNFR is %s", vnfr)
+                       while vnfr.operational_status != 'running':
+                           self._log.info("Received vnf op status is %s; retrying",vnfr.operational_status)
+                           if vnfr.operational_status == 'failed':
+                               self._log.error("Fetching VNFR for  %s failed", vnfr.id)
+                               raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" % (self.id, vnfr.id))
+                           yield from asyncio.sleep(2, loop=self._loop)
+                           vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
+                           self._log.debug("Received VNFR is %s", vnfr)
+
+                       for cp in vnfr.connection_point:
+                           if cp.name == vnffgr_classifier.vnfr_connection_point_ref:
+                               vnffgr_classifier.port_id = cp.connection_point_id
+                               vnffgr_classifier.ip_address = cp.ip_address
+                               for vdu in vnfr.vdur:
+                                   for ext_intf in vdu.external_interface:
+                                       if ext_intf.name == vnffgr_classifier.vnfr_connection_point_ref:
+                                           vnffgr_classifier.vm_id =  vdu.vim_id
+                                           self._log.debug("VIM ID for CP %s in VNFR %s is %s",cp.name,nsr_vnfr.id,
+                                                            vnfr_cp_ref.connection_point_params.vm_id)
+                                           break
+
+        self._log.info("VNFFGR msg to be sent is %s", vnffgr)
+        return vnffgr
+
+    @asyncio.coroutine
+    def vnffgr_nsr_sff_list(self):
+        """ SFF List for VNFR """
+        sff_list = {}
+        sf_list = [nsr_vnfr.name for nsr_vnfr in self._nsr.vnfrs.values() if nsr_vnfr.vnfd.service_function_chain == 'SF']
+
+        for nsr_vnfr in self._nsr.vnfrs.values():
+            if (nsr_vnfr.vnfd.service_function_chain == 'CLASSIFIER' or nsr_vnfr.vnfd.service_function_chain == 'SFF'):
+                vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
+                self._log.debug(" Received VNFR is %s", vnfr)
+                while vnfr.operational_status != 'running':
+                    self._log.info("Received vnf op status is %s; retrying",vnfr.operational_status)
+                    if vnfr.operational_status == 'failed':
+                       self._log.error("Fetching VNFR for  %s failed", vnfr.id)
+                       raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" % (self.id, vnfr.id))
+                    yield from asyncio.sleep(2, loop=self._loop)
+                    vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
+                    self._log.debug("Received VNFR is %s", vnfr)
+
+                sff =  RwsdnYang.VNFFGSff()
+                sff_list[nsr_vnfr.vnfd.id] = sff
+                sff.name = nsr_vnfr.name
+                sff.function_type = nsr_vnfr.vnfd.service_function_chain
+
+                sff.mgmt_address = vnfr.mgmt_interface.ip_address
+                sff.mgmt_port = VnffgRecord.SFF_MGMT_PORT
+                for cp in vnfr.connection_point:
+                    sff_dp = sff.dp_endpoints.add()
+                    sff_dp.name = self._nsr.name + '.' + cp.name
+                    sff_dp.address = cp.ip_address
+                    sff_dp.port  = VnffgRecord.SFF_DP_PORT
+                if nsr_vnfr.vnfd.service_function_chain == 'SFF':
+                    for sf_name in sf_list:
+                        _sf = sff.vnfr_list.add()
+                        _sf.vnfr_name = sf_name
+
+        return sff_list
+
+    @asyncio.coroutine
+    def instantiate(self):
+        """ Instantiate this VNFFG """
+
+        self._log.info("Instaniating VNFFGR with vnffgd %s",
+                       self._vnffgd_msg)
+
+
+        vnffgr_request = yield from self.vnffgr_create_msg()
+        vnffg_sff_list = yield from self.vnffgr_nsr_sff_list()
+
+        try:
+            vnffgr = self._vnffgmgr.create_vnffgr(vnffgr_request,self._vnffgd_msg.classifier,vnffg_sff_list)
+        except Exception as e:
+            self._log.exception("VNFFG instantiation failed: %s", str(e))
+            self._vnffgr_state = VnffgRecordState.FAILED
+            raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFFGR %s failure" % (self.id, vnffgr_request.id))
+
+        self._vnffgr_state = VnffgRecordState.INSTANTIATION_PENDING
+
+        self._log.info("Instantiated VNFFGR :%s", vnffgr)
+        self._vnffgr_state = VnffgRecordState.ACTIVE
+
+        self._log.info("Invoking update_state to update NSR state for NSR ID: %s", self._nsr.id)
+        yield from self._nsr.update_state()
+
+    def vnffgr_in_vnffgrm(self):
+        """ Is there a VNFR record in VNFM """
+        if (self._vnffgr_state == VnffgRecordState.ACTIVE or
+                self._vnffgr_state == VnffgRecordState.INSTANTIATION_PENDING or
+                self._vnffgr_state == VnffgRecordState.FAILED):
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def terminate(self):
+        """ Terminate this VNFFGR """
+        if not self.vnffgr_in_vnffgrm():
+            self._log.error("Ignoring terminate request for id %s in state %s",
+                            self.id, self._vnffgr_state)
+            return
+
+        self._log.info("Terminating VNFFGR id:%s", self.id)
+        self._vnffgr_state = VnffgRecordState.TERMINATE_PENDING
+
+        self._vnffgmgr.terminate_vnffgr(self._vnffgr_id)
+
+        self._vnffgr_state = VnffgRecordState.TERMINATED
+        self._log.debug("Terminated VNFFGR id:%s", self.id)
+
+
+class VirtualLinkRecord(object):
+    """ Virtual Link Records class"""
+    @staticmethod
+    @asyncio.coroutine
+    def create_record(dts, log, loop, nsr_name, vld_msg, cloud_account_name, ip_profile, nsr_id, restart_mode=False):
+        """Creates a new VLR object based on the given data.
+
+        If restart mode is enabled, then we look for existing records in the
+        DTS and create a VLR records using the exiting data(ID)
+
+        Returns:
+            VirtualLinkRecord
+        """
+        vlr_obj = VirtualLinkRecord(
+                      dts,
+                      log,
+                      loop,
+                      nsr_name,
+                      vld_msg,
+                      cloud_account_name,
+                      ip_profile,
+                      nsr_id,
+                      )
+
+        if restart_mode:
+            res_iter = yield from dts.query_read(
+                              "D,/vlr:vlr-catalog/vlr:vlr",
+                              rwdts.XactFlag.MERGE)
+
+            for fut in res_iter:
+                response = yield from fut
+                vlr = response.result
+
+                # Check if the record is already present, if so use the ID of
+                # the existing record. Since the name of the record is uniquely
+                # formed we can use it as a search key!
+                if vlr.name == vlr_obj.name:
+                    vlr_obj.reset_id(vlr.id)
+                    break
+
+        return vlr_obj
+
+    def __init__(self, dts, log, loop, nsr_name, vld_msg, cloud_account_name, ip_profile, nsr_id):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsr_name = nsr_name
+        self._vld_msg = vld_msg
+        self._cloud_account_name = cloud_account_name
+        self._assigned_subnet = None
+        self._nsr_id = nsr_id
+        self._ip_profile = ip_profile
+        self._vlr_id = str(uuid.uuid4())
+        self._state = VlRecordState.INIT
+        self._prev_state = None
+        
+    @property
+    def xpath(self):
+        """ path for this object """
+        return "D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".format(self._vlr_id)
+
+    @property
+    def id(self):
+        """ VLR id """
+        return self._vlr_id
+
+    @property
+    def nsr_name(self):
+        """ Get NSR name for this VL """
+        return self.nsr_name
+
+    @property
+    def vld_msg(self):
+        """ Virtual Link Desciptor """
+        return self._vld_msg
+
+    @property
+    def assigned_subnet(self):
+        """ Subnet assigned to this VL"""
+        return self._assigned_subnet
+
+    @property
+    def name(self):
+        """
+        Get the name for this VLR.
+        VLR name is "nsr name:VLD name"
+        """
+        if self.vld_msg.vim_network_name:
+            return self.vld_msg.vim_network_name
+        elif self.vld_msg.name == "multisite":
+            # This is a temporary hack to identify manually provisioned inter-site network
+            return self.vld_msg.name
+        else:
+            return self._nsr_name + "." + self.vld_msg.name
+
+    @property
+    def cloud_account_name(self):
+        """ Cloud account that this VLR should be created in """
+        return self._cloud_account_name
+
+    @staticmethod
+    def vlr_xpath(vlr):
+        """ Get the VLR path from VLR """
+        return (VirtualLinkRecord.XPATH + "[vlr:id = '{}']").format(vlr.id)
+
+    @property
+    def state(self):
+        """ VLR state """
+        return self._state
+
+    @state.setter
+    def state(self, value):
+        """ VLR set state """
+        self._state = value
+
+    @property
+    def prev_state(self):
+        """ VLR previous state """
+        return self._prev_state
+
+    @prev_state.setter
+    def prev_state(self, value):
+        """ VLR set previous state """
+        self._prev_state = value
+
+    @property
+    def vlr_msg(self):
+        """ Virtual Link Record message for Creating VLR in VNS """
+        vld_fields = ["short_name",
+                      "vendor",
+                      "description",
+                      "version",
+                      "type_yang",
+                      "vim_network_name",
+                      "provider_network"]
+
+        vld_copy_dict = {k: v for k, v in self.vld_msg.as_dict().items()
+                         if k in vld_fields}
+
+        vlr_dict = {"id": self._vlr_id,
+                    "nsr_id_ref": self._nsr_id,
+                    "vld_ref": self.vld_msg.id,
+                    "name": self.name,
+                    "cloud_account": self.cloud_account_name,
+                    }
+
+        if self._ip_profile and self._ip_profile.has_field('ip_profile_params'):
+            vlr_dict['ip_profile_params' ] = self._ip_profile.ip_profile_params.as_dict()
+
+        vlr_dict.update(vld_copy_dict)
+        vlr = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict(vlr_dict)
+        return vlr
+
+    def reset_id(self, vlr_id):
+        self._vlr_id = vlr_id
+
+    def create_nsr_vlr_msg(self, vnfrs):
+        """ The VLR message"""
+        nsr_vlr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vlr()
+        nsr_vlr.vlr_ref = self._vlr_id
+        nsr_vlr.assigned_subnet = self.assigned_subnet
+        nsr_vlr.cloud_account = self.cloud_account_name
+
+        for conn in self.vld_msg.vnfd_connection_point_ref:
+            for vnfr in vnfrs:
+                if (vnfr.vnfd.id == conn.vnfd_id_ref and
+                        vnfr.member_vnf_index == conn.member_vnf_index_ref and
+                        self.cloud_account_name == vnfr.cloud_account_name):
+                    cp_entry = nsr_vlr.vnfr_connection_point_ref.add()
+                    cp_entry.vnfr_id = vnfr.id
+                    cp_entry.connection_point = conn.vnfd_connection_point_ref
+
+        return nsr_vlr
+
+    @asyncio.coroutine
+    def instantiate(self):
+        """ Instantiate this VL """
+
+        self._log.debug("Instaniating VLR key %s, vld %s",
+                        self.xpath, self._vld_msg)
+        vlr = None
+        self._state = VlRecordState.INSTANTIATION_PENDING
+        self._log.debug("Executing VL create path:%s msg:%s",
+                        self.xpath, self.vlr_msg)
+
+        with self._dts.transaction(flags=0) as xact:
+            block = xact.block_create()
+            block.add_query_create(self.xpath, self.vlr_msg)
+            self._log.debug("Executing VL create path:%s msg:%s",
+                            self.xpath, self.vlr_msg)
+            res_iter = yield from block.execute(now=True)
+            for ent in res_iter:
+                res = yield from ent
+                vlr = res.result
+
+            if vlr is None:
+                self._state = VlRecordState.FAILED
+                raise NsrInstantiationFailed("Failed NS %s instantiation due to empty response" % self.id)
+
+        if vlr.operational_status == 'failed':
+            self._log.debug("NS Id:%s VL creation failed for vlr id %s", self.id, vlr.id)
+            self._state = VlRecordState.FAILED
+            raise NsrInstantiationFailed("Failed VL %s instantiation (%s)" % (vlr.id, vlr.operational_status_details))
+
+        self._log.info("Instantiated VL with xpath %s and vlr:%s",
+                       self.xpath, vlr)
+        self._state = VlRecordState.ACTIVE
+        self._assigned_subnet = vlr.assigned_subnet
+
+    def vlr_in_vns(self):
+        """ Is there a VLR record in VNS """
+        if (self._state == VlRecordState.ACTIVE or
+            self._state == VlRecordState.INSTANTIATION_PENDING or
+            self._state == VlRecordState.TERMINATE_PENDING or
+            self._state == VlRecordState.FAILED):
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def terminate(self):
+        """ Terminate this VL """
+        if not self.vlr_in_vns():
+            self._log.debug("Ignoring terminate request for id %s in state %s",
+                            self.id, self._state)
+            return
+
+        self._log.debug("Terminating VL id:%s", self.id)
+        self._state = VlRecordState.TERMINATE_PENDING
+
+        with self._dts.transaction(flags=0) as xact:
+            block = xact.block_create()
+            block.add_query_delete(self.xpath)
+            yield from block.execute(flags=0, now=True)
+
+        self._state = VlRecordState.TERMINATED
+        self._log.debug("Terminated VL id:%s", self.id)
+
+
+class VnfRecordState(Enum):
+    """ Vnf Record State """
+    INIT = 101
+    INSTANTIATION_PENDING = 102
+    ACTIVE = 103
+    TERMINATE_PENDING = 104
+    TERMINATED = 105
+    FAILED = 106
+
+
+class VirtualNetworkFunctionRecord(object):
+    """ Virtual Network Function Record class"""
+    XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+
+    @staticmethod
+    @asyncio.coroutine
+    def create_record(dts, log, loop, vnfd, const_vnfd_msg, nsd_id, nsr_name,
+                cloud_account_name, nsr_id, group_name, group_instance_id,
+                placement_groups, restart_mode=False):
+        """Creates a new VNFR object based on the given data.
+
+        If restart mode is enabled, then we look for existing records in the
+        DTS and create a VNFR records using the exiting data(ID)
+
+        Returns:
+            VirtualNetworkFunctionRecord
+        """
+        vnfr_obj = VirtualNetworkFunctionRecord(
+                          dts,
+                          log,
+                          loop,
+                          vnfd,
+                          const_vnfd_msg,
+                          nsd_id,
+                          nsr_name,
+                          cloud_account_name,
+                          nsr_id,
+                          group_name,
+                          group_instance_id,
+                          placement_groups,
+                          restart_mode=restart_mode)
+
+        if restart_mode:
+            res_iter = yield from dts.query_read(
+                              "D,/vnfr:vnfr-catalog/vnfr:vnfr",
+                              rwdts.XactFlag.MERGE)
+
+            for fut in res_iter:
+                response = yield from fut
+                vnfr = response.result
+
+                if vnfr.name == vnfr_obj.name:
+                    vnfr_obj.reset_id(vnfr.id)
+                    break
+
+        return vnfr_obj
+
+    def __init__(self,
+                 dts,
+                 log,
+                 loop,
+                 vnfd,
+                 const_vnfd_msg,
+                 nsd_id,
+                 nsr_name,
+                 cloud_account_name,
+                 nsr_id,
+                 group_name=None,
+                 group_instance_id=None,
+                 placement_groups = [],
+                 restart_mode = False):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnfd = vnfd
+        self._const_vnfd_msg = const_vnfd_msg
+        self._nsd_id = nsd_id
+        self._nsr_name = nsr_name
+        self._nsr_id = nsr_id
+        self._cloud_account_name = cloud_account_name
+        self._group_name = group_name
+        self._group_instance_id = group_instance_id
+        self._placement_groups = placement_groups
+        self._config_status = NsrYang.ConfigStates.INIT
+
+        self._prev_state = VnfRecordState.INIT
+        self._state = VnfRecordState.INIT
+        self._state_failed_reason = None
+
+        self.config_store = rift.mano.config_data.config.ConfigStore(self._log)
+        self.configure()
+
+        self._vnfr_id = str(uuid.uuid4())
+        self._name = None
+        self._vnfr_msg = self.create_vnfr_msg()
+        self._log.debug("Set VNFR {} config type to {}".
+                        format(self.name, self.config_type))
+        self.restart_mode = restart_mode
+
+
+        if group_name is None and group_instance_id is not None:
+            raise ValueError("Group instance id must not be provided with an empty group name")
+
+    @property
+    def id(self):
+        """ VNFR id """
+        return self._vnfr_id
+
+    @property
+    def xpath(self):
+        """ VNFR xpath """
+        return "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']".format(self.id)
+
+    @property
+    def vnfr_msg(self):
+        """ VNFR message """
+        return self._vnfr_msg
+
+    @property
+    def const_vnfr_msg(self):
+        """ VNFR message """
+        return RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConstituentVnfrRef(vnfr_id=self.id,cloud_account=self.cloud_account_name)
+
+    @property
+    def vnfd(self):
+        """ vnfd """
+        return self._vnfd
+
+    @property
+    def cloud_account_name(self):
+        """ Cloud account that this VNF should be created in """
+        return self._cloud_account_name
+
+
+    @property
+    def active(self):
+        """ Is this VNF actve """
+        return True if self._state == VnfRecordState.ACTIVE else False
+
+    @property
+    def state(self):
+        """ state of this VNF """
+        return self._state
+
+    @property
+    def state_failed_reason(self):
+        """ Error message in case this VNF is in failed state """
+        return self._state_failed_reason
+
+    @property
+    def member_vnf_index(self):
+        """ Member VNF index """
+        return self._const_vnfd_msg.member_vnf_index
+
+    @property
+    def nsr_name(self):
+        """ NSR name"""
+        return self._nsr_name
+
+    @property
+    def name(self):
+        """ Name of this VNFR """
+        if self._name is not None:
+            return self._name
+
+        name_tags = [self._nsr_name]
+
+        if self._group_name is not None:
+            name_tags.append(self._group_name)
+
+        if self._group_instance_id is not None:
+            name_tags.append(str(self._group_instance_id))
+
+        name_tags.extend([self.vnfd.name, str(self.member_vnf_index)])
+
+        self._name = "__".join(name_tags)
+
+        return self._name
+
+    @staticmethod
+    def vnfr_xpath(vnfr):
+        """ Get the VNFR path from VNFR """
+        return (VirtualNetworkFunctionRecord.XPATH + "[vnfr:id = '{}']").format(vnfr.id)
+
+    @property
+    def config_type(self):
+        cfg_types = ['netconf', 'juju', 'script']
+        for method in cfg_types:
+            if self._vnfd.vnf_configuration.has_field(method):
+                return method
+        return 'none'
+
+    @property
+    def config_status(self):
+        """Return the config status as YANG ENUM string"""
+        self._log.debug("Map VNFR {} config status {} ({})".
+                        format(self.name, self._config_status, self.config_type))
+        if self.config_type == 'none':
+            return 'config_not_needed'
+        elif self._config_status == NsrYang.ConfigStates.CONFIGURED:
+            return 'configured'
+        elif self._config_status == NsrYang.ConfigStates.FAILED:
+            return 'failed'
+
+        return 'configuring'
+
+    def set_state(self, state):
+        """ set the state of this object """
+        self._prev_state = self._state
+        self._state = state
+
+    def reset_id(self, vnfr_id):
+        self._vnfr_id = vnfr_id
+        self._vnfr_msg = self.create_vnfr_msg()
+
+    def configure(self):
+        self.config_store.merge_vnfd_config(
+                    self._nsd_id,
+                    self._vnfd,
+                    self.member_vnf_index,
+                    )
+
+    def create_vnfr_msg(self):
+        """ VNFR message for this VNFR """
+        vnfd_fields = [
+                "short_name",
+                "vendor",
+                "description",
+                "version",
+                "type_yang",
+                ]
+        vnfd_copy_dict = {k: v for k, v in self._vnfd.as_dict().items() if k in vnfd_fields}
+        vnfr_dict = {
+                "id": self.id,
+                "nsr_id_ref": self._nsr_id,
+                "vnfd_ref": self.vnfd.id,
+                "name": self.name,
+                "cloud_account": self._cloud_account_name,
+                "config_status": self.config_status
+                }
+        vnfr_dict.update(vnfd_copy_dict)
+
+        vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
+        vnfr.member_vnf_index_ref = self.member_vnf_index
+        vnfr.vnf_configuration.from_dict(self._vnfd.vnf_configuration.as_dict())
+
+        if self._vnfd.mgmt_interface.has_field("port"):
+            vnfr.mgmt_interface.port = self._vnfd.mgmt_interface.port
+
+        for group_info in self._placement_groups:
+            group = vnfr.placement_groups_info.add()
+            group.from_dict(group_info.as_dict())
+
+        # UI expects the monitoring param field to exist
+        vnfr.monitoring_param = []
+
+        self._log.debug("Get vnfr_msg for VNFR {} : {}".format(self.name, vnfr))
+        return vnfr
+
+    @asyncio.coroutine
+    def update_vnfm(self):
+        self._log.debug("Send an update to VNFM for VNFR {} with {}".
+                        format(self.name, self.vnfr_msg))
+        yield from self._dts.query_update(
+                self.xpath,
+                rwdts.XactFlag.TRACE,
+                self.vnfr_msg
+                )
+
+    def get_config_status(self):
+        """Return the config status as YANG ENUM"""
+        return self._config_status
+
+    @asyncio.coroutine
+    def set_config_status(self, status):
+
+        def status_to_string(status):
+            status_dc = {
+                NsrYang.ConfigStates.INIT : 'init',
+                NsrYang.ConfigStates.CONFIGURING : 'configuring',
+                NsrYang.ConfigStates.CONFIG_NOT_NEEDED : 'config_not_needed',
+                NsrYang.ConfigStates.CONFIGURED : 'configured',
+                NsrYang.ConfigStates.FAILED : 'failed',
+            }
+
+            return status_dc[status]
+
+        self._log.debug("Update VNFR {} from {} ({}) to {}".
+                        format(self.name, self._config_status,
+                               self.config_type, status))
+        if self._config_status == NsrYang.ConfigStates.CONFIGURED:
+            self._log.error("Updating already configured VNFR {}".
+                            format(self.name))
+            return
+
+        if self._config_status != status:
+            try:
+                self._config_status = status
+                # I don't think this is used. Original implementor can check.
+                # Caused Exception, so corrected it by status_to_string
+                # But not sure whats the use of this variable?
+                self.vnfr_msg.config_status = status_to_string(status)
+            except Exception as e:
+                self._log.error("Exception=%s", str(e))
+                pass
+
+            self._log.debug("Updated VNFR {} status to {}".format(self.name, status))
+
+            if self._config_status != NsrYang.ConfigStates.INIT:
+                try:
+                    # Publish only after VNFM has the VNFR created
+                    yield from self.update_vnfm()
+                except Exception as e:
+                    self._log.error("Exception updating VNFM with new status {} of VNFR {}: {}".
+                                format(status, self.name, e))
+                    self._log.exception(e)
+
+    def is_configured(self):
+        if self.config_type == 'none':
+            return True
+
+        if self._config_status == NsrYang.ConfigStates.CONFIGURED:
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def instantiate(self, nsr):
+        """ Instantiate this VNFR"""
+
+        self._log.debug("Instaniating VNFR key %s, vnfd %s",
+                        self.xpath, self._vnfd)
+
+        self._log.debug("Create VNF with xpath %s and vnfr %s",
+                        self.xpath, self.vnfr_msg)
+
+        self.set_state(VnfRecordState.INSTANTIATION_PENDING)
+
+        def find_vlr_for_cp(conn):
+            """ Find VLR for the given connection point """
+            for vlr in nsr.vlrs:
+                for vnfd_cp in vlr.vld_msg.vnfd_connection_point_ref:
+                    if (vnfd_cp.vnfd_id_ref == self._vnfd.id and
+                            vnfd_cp.vnfd_connection_point_ref == conn.name and
+                            vnfd_cp.member_vnf_index_ref == self.member_vnf_index and
+                             vlr.cloud_account_name == self.cloud_account_name):
+                        self._log.debug("Found VLR for cp_name:%s and vnf-index:%d",
+                                        conn.name, self.member_vnf_index)
+                        return vlr
+            return None
+
+        # For every connection point in the VNFD fill in the identifier
+        for conn_p in self._vnfd.connection_point:
+            cpr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint()
+            cpr.name = conn_p.name
+            cpr.type_yang = conn_p.type_yang
+            vlr_ref = find_vlr_for_cp(conn_p)
+            if vlr_ref is None:
+                msg = "Failed to find VLR for cp = %s" % conn_p.name
+                self._log.debug("%s", msg)
+#                raise VirtualNetworkFunctionRecordError(msg)
+                continue
+
+            cpr.vlr_ref = vlr_ref.id
+            self.vnfr_msg.connection_point.append(cpr)
+            self._log.debug("Connection point [%s] added, vnf id=%s vnfd id=%s",
+                            cpr, self.vnfr_msg.id, self.vnfr_msg.vnfd_ref)
+
+        if not self.restart_mode:
+            yield from self._dts.query_create(self.xpath,
+                                              0,   # this is sub
+                                              self.vnfr_msg)
+        else:
+            yield from self._dts.query_update(self.xpath,
+                                              0,
+                                              self.vnfr_msg)
+
+        self._log.info("Created VNF with xpath %s and vnfr %s",
+                       self.xpath, self.vnfr_msg)
+
+        self._log.info("Instantiated VNFR with xpath %s and vnfd %s, vnfr %s",
+                       self.xpath, self._vnfd, self.vnfr_msg)
+
+    @asyncio.coroutine
+    def update_state(self, vnfr_msg):
+        """ Update this VNFR"""
+        if vnfr_msg.operational_status == "running":
+            if self.vnfr_msg.operational_status != "running":
+                yield from self.is_active()
+        elif vnfr_msg.operational_status == "failed":
+            yield from self.instantiation_failed(failed_reason=vnfr_msg.operational_status_details)
+
+    @asyncio.coroutine
+    def is_active(self):
+        """ This VNFR is active """
+        self._log.debug("VNFR %s is active", self._vnfr_id)
+        self.set_state(VnfRecordState.ACTIVE)
+
+    @asyncio.coroutine
+    def instantiation_failed(self, failed_reason=None):
+        """ This VNFR instantiation failed"""
+        self._log.error("VNFR %s instantiation failed", self._vnfr_id)
+        self.set_state(VnfRecordState.FAILED)
+        self._state_failed_reason = failed_reason
+
+    def vnfr_in_vnfm(self):
+        """ Is there a VNFR record in VNFM """
+        if (self._state == VnfRecordState.ACTIVE or
+                self._state == VnfRecordState.INSTANTIATION_PENDING or
+                self._state == VnfRecordState.FAILED):
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def terminate(self):
+        """ Terminate this VNF """
+        if not self.vnfr_in_vnfm():
+            self._log.debug("Ignoring terminate request for id %s in state %s",
+                            self.id, self._state)
+            return
+
+        self._log.debug("Terminating VNF id:%s", self.id)
+        self.set_state(VnfRecordState.TERMINATE_PENDING)
+        with self._dts.transaction(flags=0) as xact:
+            block = xact.block_create()
+            block.add_query_delete(self.xpath)
+            yield from block.execute(flags=0)
+        self.set_state(VnfRecordState.TERMINATED)
+        self._log.debug("Terminated VNF id:%s", self.id)
+
+
+class NetworkServiceStatus(object):
+    """ A class representing the Network service's status """
+    MAX_EVENTS_RECORDED = 10
+    """ Network service Status class"""
+    def __init__(self, dts, log, loop):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+        self._state = NetworkServiceRecordState.INIT
+        self._events = deque([])
+
+    @asyncio.coroutine
+    def create_notification(self, evt, evt_desc, evt_details):
+        xp = "N,/rw-nsr:nsm-notification"
+        notif = RwNsrYang.YangNotif_RwNsr_NsmNotification()
+        notif.event = evt
+        notif.description = evt_desc
+        notif.details = evt_details if evt_details is not None else None
+
+        yield from self._dts.query_create(xp, rwdts.XactFlag.ADVISE, notif)
+        self._log.info("Notification called by creating dts query: %s", notif)
+
+    def record_event(self, evt, evt_desc, evt_details):
+        """ Record an event """
+        self._log.debug("Recording event - evt %s, evt_descr %s len = %s",
+                        evt, evt_desc, len(self._events))
+        if len(self._events) >= NetworkServiceStatus.MAX_EVENTS_RECORDED:
+            self._events.popleft()
+        self._events.append((int(time.time()), evt, evt_desc,
+                             evt_details if evt_details is not None else None))
+
+        self._loop.create_task(self.create_notification(evt,evt_desc,evt_details))
+
+    def set_state(self, state):
+        """ set the state of this status object """
+        self._state = state
+
+    def yang_str(self):
+        """ Return the state as a yang enum string """
+        state_to_str_map = {"INIT": "init",
+                            "VL_INIT_PHASE": "vl_init_phase",
+                            "VNF_INIT_PHASE": "vnf_init_phase",
+                            "VNFFG_INIT_PHASE": "vnffg_init_phase",
+                            "SCALING_GROUP_INIT_PHASE": "scaling_group_init_phase",
+                            "RUNNING": "running",
+                            "SCALING_OUT": "scaling_out",
+                            "SCALING_IN": "scaling_in",
+                            "TERMINATE_RCVD": "terminate_rcvd",
+                            "TERMINATE": "terminate",
+                            "VL_TERMINATE_PHASE": "vl_terminate_phase",
+                            "VNF_TERMINATE_PHASE": "vnf_terminate_phase",
+                            "VNFFG_TERMINATE_PHASE": "vnffg_terminate_phase",
+                            "TERMINATED": "terminated",
+                            "FAILED": "failed",
+                            "VL_INSTANTIATE": "vl_instantiate",
+                            "VL_TERMINATE": "vl_terminate",
+        }
+        return state_to_str_map[self._state.name]
+
+    @property
+    def state(self):
+        """ State of this status object """
+        return self._state
+
+    @property
+    def msg(self):
+        """ Network Service Record as a message"""
+        event_list = []
+        idx = 1
+        for entry in self._events:
+            event = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_OperationalEvents()
+            event.id = idx
+            idx += 1
+            event.timestamp, event.event, event.description, event.details = entry
+            event_list.append(event)
+        return event_list
+
+
+class NetworkServiceRecord(object):
+    """ Network service record """
+    XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
+
+    def __init__(self, dts, log, loop, nsm, nsm_plugin, nsr_cfg_msg, sdn_account_name, restart_mode=False):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+        self._nsr_cfg_msg = nsr_cfg_msg
+        self._nsm_plugin = nsm_plugin
+        self._sdn_account_name = sdn_account_name
+
+        self._nsd = None
+        self._nsr_msg = None
+        self._nsr_regh = None
+        self._vlrs = []
+        self._vnfrs = {}
+        self._vnfds = {}
+        self._vnffgrs = {}
+        self._param_pools = {}
+        self._scaling_groups = {}
+        self._create_time = int(time.time())
+        self._op_status = NetworkServiceStatus(dts, log, loop)
+        self._config_status = NsrYang.ConfigStates.CONFIGURING
+        self._config_status_details = None
+        self._job_id = 0
+        self.restart_mode = restart_mode
+        self.config_store = rift.mano.config_data.config.ConfigStore(self._log)
+        self._debug_running = False
+        self._is_active = False
+        self._vl_phase_completed = False
+        self._vnf_phase_completed = False
+
+
+        # Initalise the state to init
+        # The NSR moves through the following transitions
+        # 1. INIT -> VLS_READY once all the VLs in the NSD are created
+        # 2. VLS_READY - VNFS_READY when all the VNFs in the NSD are created
+        # 3. VNFS_READY - READY when the NSR is published
+
+        self.set_state(NetworkServiceRecordState.INIT)
+
+        self.substitute_input_parameters = InputParameterSubstitution(self._log)
+
+    @property
+    def nsm_plugin(self):
+        """ NSM Plugin """
+        return self._nsm_plugin
+
+    def set_state(self, state):
+        """ Set state for this NSR"""
+        self._log.debug("Setting state to %s", state)
+        # We are in init phase and is moving to the next state
+        # The new state could be a FAILED state or VNF_INIIT_PHASE
+        if self.state == NetworkServiceRecordState.VL_INIT_PHASE:
+            self._vl_phase_completed = True
+
+        if self.state == NetworkServiceRecordState.VNF_INIT_PHASE:
+            self._vnf_phase_completed = True
+
+        self._op_status.set_state(state)
+
+    @property
+    def id(self):
+        """ Get id for this NSR"""
+        return self._nsr_cfg_msg.id
+
+    @property
+    def name(self):
+        """ Name of this network service record """
+        return self._nsr_cfg_msg.name
+
+    @property
+    def cloud_account_name(self):
+        return self._nsr_cfg_msg.cloud_account
+
+    @property
+    def state(self):
+        """State of this NetworkServiceRecord"""
+        return self._op_status.state
+
+    @property
+    def active(self):
+        """ Is this NSR active ?"""
+        return True if self._op_status.state == NetworkServiceRecordState.RUNNING else False
+
+    @property
+    def vlrs(self):
+        """ VLRs associated with this NSR"""
+        return self._vlrs
+
+    @property
+    def vnfrs(self):
+        """ VNFRs associated with this NSR"""
+        return self._vnfrs
+
+    @property
+    def vnffgrs(self):
+        """ VNFFGRs associated with this NSR"""
+        return self._vnffgrs
+
+    @property
+    def scaling_groups(self):
+        """ Scaling groups associated with this NSR """
+        return self._scaling_groups
+
+    @property
+    def param_pools(self):
+        """ Parameter value pools associated with this NSR"""
+        return self._param_pools
+
+    @property
+    def nsr_cfg_msg(self):
+        return self._nsr_cfg_msg
+
+    @nsr_cfg_msg.setter
+    def nsr_cfg_msg(self, msg):
+        self._nsr_cfg_msg = msg
+
+    @property
+    def nsd_msg(self):
+        """ NSD Protobuf for this NSR """
+        if self._nsd is not None:
+            return self._nsd
+        self._nsd = self._nsr_cfg_msg.nsd
+        return self._nsd
+
+    @property
+    def nsd_id(self):
+        """ NSD ID for this NSR """
+        return self.nsd_msg.id
+
+    @property
+    def job_id(self):
+        ''' Get a new job id for config primitive'''
+        self._job_id += 1
+        return self._job_id
+
+    @property
+    def config_status(self):
+        """ Config status for NSR """
+        return self._config_status
+
+    def resolve_placement_group_cloud_construct(self, input_group):
+        """
+        Returns the cloud specific construct for placement group
+        """
+        copy_dict = ['name', 'requirement', 'strategy']
+
+        for group_info in self._nsr_cfg_msg.nsd_placement_group_maps:
+            if group_info.placement_group_ref == input_group.name:
+                group = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_PlacementGroupsInfo()
+                group_dict = {k:v for k,v in
+                              group_info.as_dict().items() if k != 'placement_group_ref'}
+                for param in copy_dict:
+                    group_dict.update({param: getattr(input_group, param)})
+                group.from_dict(group_dict)
+                return group
+        return None
+
+
+    def __str__(self):
+        return "NSR(name={}, nsd_id={}, cloud_account={})".format(
+                self.name, self.nsd_id, self.cloud_account_name
+                )
+
+    def _get_vnfd(self, vnfd_id, config_xact):
+        """  Fetch vnfd msg for the passed vnfd id """
+        return self._nsm.get_vnfd(vnfd_id, config_xact)
+
+    def _get_vnfd_cloud_account(self, vnfd_member_index):
+        """  Fetch Cloud Account for the passed vnfd id """
+        if self._nsr_cfg_msg.vnf_cloud_account_map:
+           vim_accounts = [vnf.cloud_account  for vnf in self._nsr_cfg_msg.vnf_cloud_account_map \
+                           if vnfd_member_index == vnf.member_vnf_index_ref]
+           if vim_accounts and vim_accounts[0]:
+               return vim_accounts[0]
+        return self.cloud_account_name
+
+    def _get_constituent_vnfd_msg(self, vnf_index):
+        for const_vnfd in self.nsd_msg.constituent_vnfd:
+            if const_vnfd.member_vnf_index == vnf_index:
+                return const_vnfd
+
+        raise ValueError("Constituent VNF index %s not found" % vnf_index)
+
+    def record_event(self, evt, evt_desc, evt_details=None, state=None):
+        """ Record an event """
+        self._op_status.record_event(evt, evt_desc, evt_details)
+        if state is not None:
+            self.set_state(state)
+
+    def scaling_trigger_str(self, trigger):
+        SCALING_TRIGGER_STRS = {
+            NsdYang.ScalingTrigger.PRE_SCALE_IN : 'pre-scale-in',
+            NsdYang.ScalingTrigger.POST_SCALE_IN : 'post-scale-in',
+            NsdYang.ScalingTrigger.PRE_SCALE_OUT : 'pre-scale-out',
+            NsdYang.ScalingTrigger.POST_SCALE_OUT : 'post-scale-out',
+        }
+        try:
+            return SCALING_TRIGGER_STRS[trigger]
+        except Exception as e:
+            self._log.error("Scaling trigger mapping error for {} : {}".
+                            format(trigger, e))
+            self._log.exception(e)
+            return "Unknown trigger"
+
+    @asyncio.coroutine
+    def instantiate_vls(self):
+        """
+        This function instantiates VLs for every VL in this Network Service
+        """
+        self._log.debug("Instantiating %d VLs in NSD id %s", len(self._vlrs),
+                        self.id)
+        for vlr in self._vlrs:
+            yield from self.nsm_plugin.instantiate_vl(self, vlr)
+            vlr.state = VlRecordState.ACTIVE
+
+    @asyncio.coroutine
+    def create(self, config_xact):
+        """ Create this network service"""
+        # Create virtual links  for all the external vnf
+        # connection points in this NS
+        yield from self.create_vls()
+
+        # Create VNFs in this network service
+        yield from self.create_vnfs(config_xact)
+
+        # Create VNFFG for network service
+        self.create_vnffgs()
+
+        # Create Scaling Groups for each scaling group in NSD
+        self.create_scaling_groups()
+
+        # Create Parameter Pools
+        self.create_param_pools()
+
+    @asyncio.coroutine
+    def apply_scale_group_config_script(self, script, group, scale_instance, trigger, vnfrs=None):
+        """ Apply config based on script for scale group """
+
+        @asyncio.coroutine
+        def add_vnfrs_data(vnfrs_list):
+            """ Add as a dict each of the VNFRs data """
+            vnfrs_data = []
+            for vnfr in vnfrs_list:
+                self._log.debug("Add VNFR {} data".format(vnfr))
+                vnfr_data = dict()
+                vnfr_data['name'] = vnfr.name
+                if trigger in [NsdYang.ScalingTrigger.PRE_SCALE_IN, NsdYang.ScalingTrigger.POST_SCALE_OUT]:
+                    # Get VNF management and other IPs, etc
+                    opdata = yield from self.fetch_vnfr(vnfr.xpath)
+                    self._log.debug("VNFR {} op data: {}".format(vnfr.name, opdata))
+                    try:
+                        vnfr_data['rw_mgmt_ip'] = opdata.mgmt_interface.ip_address
+                        vnfr_data['rw_mgmt_port'] = opdata.mgmt_interface.port
+                    except Exception as e:
+                        self._log.error("Unable to get management IP for vnfr {}:{}".
+                                        format(vnfr.name, e))
+
+                    try:
+                        vnfr_data['connection_points'] = []
+                        for cp in opdata.connection_point:
+                            con_pt = dict()
+                            con_pt['name'] = cp.name
+                            con_pt['ip_address'] = cp.ip_address
+                            vnfr_data['connection_points'].append(con_pt)
+                    except Exception as e:
+                        self._log.error("Exception getting connections points for VNFR {}: {}".
+                                        format(vnfr.name, e))
+
+                vnfrs_data.append(vnfr_data)
+                self._log.debug("VNFRs data: {}".format(vnfrs_data))
+
+            return vnfrs_data
+
+        def add_nsr_data(nsr):
+            nsr_data = dict()
+            nsr_data['name'] = nsr.name
+            return nsr_data
+
+        if script is None or len(script) == 0:
+            self._log.error("Script not provided for scale group config: {}".format(group.name))
+            return False
+
+        if script[0] == '/':
+            path = script
+        else:
+            path = os.path.join(os.environ['RIFT_INSTALL'], "usr/bin", script)
+        if not os.path.exists(path):
+            self._log.error("Config faled for scale group {}: Script does not exist at {}".
+                            format(group.name, path))
+            return False
+
+        # Build a YAML file with all parameters for the script to execute
+        # The data consists of 5 sections
+        # 1. Trigger
+        # 2. Scale group config
+        # 3. VNFRs in the scale group
+        # 4. VNFRs outside scale group
+        # 5. NSR data
+        data = dict()
+        data['trigger'] = group.trigger_map(trigger)
+        data['config'] = group.group_msg.as_dict()
+
+        if vnfrs:
+            data["vnfrs_in_group"] = yield from add_vnfrs_data(vnfrs)
+        else:
+            data["vnfrs_in_group"] = yield from add_vnfrs_data(scale_instance.vnfrs)
+
+        data["vnfrs_others"] = yield from add_vnfrs_data(self.vnfrs.values())
+        data["nsr"] = add_nsr_data(self)
+
+        tmp_file = None
+        with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
+            tmp_file.write(yaml.dump(data, default_flow_style=True)
+                    .encode("UTF-8"))
+
+        self._log.debug("Creating a temp file: {} with input data: {}".
+                        format(tmp_file.name, data))
+
+        cmd = "{} {}".format(path, tmp_file.name)
+        self._log.debug("Running the CMD: {}".format(cmd))
+        proc = yield from asyncio.create_subprocess_shell(cmd, loop=self._loop)
+        rc = yield from proc.wait()
+        if rc:
+            self._log.error("The script {} for scale group {} config returned: {}".
+                            format(script, group.name, rc))
+            return False
+
+        # Success
+        return True
+
+
+    @asyncio.coroutine
+    def apply_scaling_group_config(self, trigger, group, scale_instance, vnfrs=None):
+        """ Apply the config for the scaling group based on trigger """
+        if group is None or scale_instance is None:
+            return False
+
+        @asyncio.coroutine
+        def update_config_status(success=True, err_msg=None):
+            self._log.debug("Update %s config status to %r : %s",
+                            scale_instance, success, err_msg)
+            if (scale_instance.config_status == "failed"):
+                # Do not update the config status if it is already in failed state
+                return
+
+            if scale_instance.config_status == "configured":
+                # Update only to failed state an already configured scale instance
+                if not success:
+                    scale_instance.config_status = "failed"
+                    scale_instance.config_err_msg = err_msg
+                    yield from self.update_state()
+            else:
+                # We are in configuring state
+                # Only after post scale out mark instance as configured
+                if trigger == NsdYang.ScalingTrigger.POST_SCALE_OUT:
+                    if success:
+                        scale_instance.config_status = "configured"
+                    else:
+                        scale_instance.config_status = "failed"
+                        scale_instance.config_err_msg = err_msg
+                    yield from self.update_state()
+
+        config = group.trigger_config(trigger)
+        if config is None:
+            return True
+
+        self._log.debug("Scaling group {} config: {}".format(group.name, config))
+        if config.has_field("ns_config_primitive_name_ref"):
+            config_name = config.ns_config_primitive_name_ref
+            nsd_msg = self.nsd_msg
+            config_primitive = None
+            for ns_cfg_prim in nsd_msg.service_primitive:
+                if ns_cfg_prim.name == config_name:
+                    config_primitive = ns_cfg_prim
+                    break
+
+            if config_primitive is None:
+                raise ValueError("Could not find ns_cfg_prim %s in nsr %s" % (config_name, self.name))
+
+            self._log.debug("Scaling group {} config primitive: {}".format(group.name, config_primitive))
+            if config_primitive.has_field("user_defined_script"):
+                rc = yield from self.apply_scale_group_config_script(config_primitive.user_defined_script,
+                                                                     group, scale_instance, trigger, vnfrs)
+                err_msg = None
+                if not rc:
+                    err_msg = "Failed config for trigger {} using config script '{}'". \
+                              format(self.scaling_trigger_str(trigger),
+                                     config_primitive.user_defined_script)
+                yield from update_config_status(success=rc, err_msg=err_msg)
+                return rc
+            else:
+                err_msg = "Failed config for trigger {} as config script is not specified". \
+                          format(self.scaling_trigger_str(trigger))
+                yield from update_config_status(success=False, err_msg=err_msg)
+                raise NotImplementedError("Only script based config support for scale group for now: {}".
+                                          format(group.name))
+        else:
+            err_msg = "Failed config for trigger {} as config primitive is not specified".\
+                      format(self.scaling_trigger_str(trigger))
+            yield from update_config_status(success=False, err_msg=err_msg)
+            self._log.error("Config primitive not specified for config action in scale group %s" %
+                            (group.name))
+        return False
+
+    def create_scaling_groups(self):
+        """ This function creates a NSScalingGroup for every scaling
+        group defined in he NSD"""
+
+        for scaling_group_msg in self.nsd_msg.scaling_group_descriptor:
+            self._log.debug("Found scaling_group %s in nsr id %s",
+                            scaling_group_msg.name, self.id)
+
+            group_record = scale_group.ScalingGroup(
+                    self._log,
+                    scaling_group_msg
+                    )
+
+            self._scaling_groups[group_record.name] = group_record
+
+    @asyncio.coroutine
+    def create_scale_group_instance(self, group_name, index, config_xact, is_default=False):
+        group = self._scaling_groups[group_name]
+        scale_instance = group.create_instance(index, is_default)
+
+        @asyncio.coroutine
+        def create_vnfs():
+            self._log.debug("Creating %u VNFs associated with NS id %s scaling group %s",
+                            len(self.nsd_msg.constituent_vnfd), self.id, self)
+
+            vnfrs = []
+            for vnf_index, count in group.vnf_index_count_map.items():
+                const_vnfd_msg = self._get_constituent_vnfd_msg(vnf_index)
+                vnfd_msg = self._get_vnfd(const_vnfd_msg.vnfd_id_ref, config_xact)
+
+                cloud_account_name = self._get_vnfd_cloud_account(const_vnfd_msg.member_vnf_index)
+                if cloud_account_name is None:
+                    cloud_account_name = self.cloud_account_name
+                for _ in range(count):
+                    vnfr = yield from self.create_vnf_record(vnfd_msg, const_vnfd_msg, cloud_account_name, group_name, index)
+                    scale_instance.add_vnfr(vnfr)
+                    vnfrs.append(vnfr)
+
+            return vnfrs
+
+        @asyncio.coroutine
+        def instantiate_instance():
+            self._log.debug("Creating %s VNFRS", scale_instance)
+            vnfrs = yield from create_vnfs()
+            yield from self.publish()
+
+            self._log.debug("Instantiating %s VNFRS for %s", len(vnfrs), scale_instance)
+            scale_instance.operational_status = "vnf_init_phase"
+            yield from self.update_state()
+
+            try:
+                rc = yield from self.apply_scaling_group_config(NsdYang.ScalingTrigger.PRE_SCALE_OUT,
+                                                                group, scale_instance, vnfrs)
+                if not rc:
+                    self._log.error("Pre scale out config for scale group {} ({}) failed".
+                                    format(group.name, index))
+                    scale_instance.operational_status = "failed"
+                else:
+                    yield from self.instantiate_vnfs(vnfrs)
+
+            except Exception as e:
+                self._log.exception("Failed to begin instantiatiation of vnfs for scale group {}: {}".
+                                    format(group.name, e))
+                self._log.exception(e)
+                scale_instance.operational_status = "failed"
+
+            yield from self.update_state()
+
+        yield from instantiate_instance()
+
+    @asyncio.coroutine
+    def delete_scale_group_instance(self, group_name, index):
+        group = self._scaling_groups[group_name]
+        scale_instance = group.get_instance(index)
+        if scale_instance.is_default:
+            raise ScalingOperationError("Cannot terminate a default scaling group instance")
+
+        scale_instance.operational_status = "terminate"
+        yield from self.update_state()
+
+        @asyncio.coroutine
+        def terminate_instance():
+            self._log.debug("Terminating %s VNFRS" % scale_instance)
+            rc = yield from self.apply_scaling_group_config(NsdYang.ScalingTrigger.PRE_SCALE_IN,
+                                                            group, scale_instance)
+            if not rc:
+                self._log.error("Pre scale in config for scale group {} ({}) failed".
+                                format(group.name, index))
+
+            # Going ahead with terminate, even if there is an error in pre-scale-in config
+            # as this could be result of scale out failure and we need to cleanup this group
+            yield from self.terminate_vnfrs(scale_instance.vnfrs)
+            group.delete_instance(index)
+
+            scale_instance.operational_status = "vnf_terminate_phase"
+            yield from self.update_state()
+
+        yield from terminate_instance()
+
+    @asyncio.coroutine
+    def _update_scale_group_instances_status(self):
+        @asyncio.coroutine
+        def post_scale_out_task(group, instance):
+            # Apply post scale out config once all VNFRs are active
+            rc = yield from self.apply_scaling_group_config(NsdYang.ScalingTrigger.POST_SCALE_OUT,
+                                                            group, instance)
+            instance.operational_status = "running"
+            if rc:
+                self._log.debug("Scale out for group {} and instance {} succeeded".
+                                format(group.name, instance.instance_id))
+            else:
+                self._log.error("Post scale out config for scale group {} ({}) failed".
+                                format(group.name, instance.instance_id))
+
+            yield from self.update_state()
+
+        group_instances = {group: group.instances for group in self._scaling_groups.values()}
+        for group, instances in group_instances.items():
+            self._log.debug("Updating %s instance status", group)
+            for instance in instances:
+                instance_vnf_state_list = [vnfr.state for vnfr in instance.vnfrs]
+                self._log.debug("Got vnfr instance states: %s", instance_vnf_state_list)
+                if instance.operational_status == "vnf_init_phase":
+                    if all([state == VnfRecordState.ACTIVE for state in instance_vnf_state_list]):
+                        instance.operational_status = "running"
+
+                        # Create a task for post scale out to allow us to sleep before attempting
+                        # to configure newly created VM's
+                        self._loop.create_task(post_scale_out_task(group, instance))
+
+                    elif any([state == VnfRecordState.FAILED for state in instance_vnf_state_list]):
+                        self._log.debug("Scale out for group {} and instance {} failed".
+                                        format(group.name, instance.instance_id))
+                        instance.operational_status = "failed"
+
+                elif instance.operational_status == "vnf_terminate_phase":
+                    if all([state == VnfRecordState.TERMINATED for state in instance_vnf_state_list]):
+                        instance.operational_status = "terminated"
+                        rc = yield from self.apply_scaling_group_config(NsdYang.ScalingTrigger.POST_SCALE_IN,
+                                                                         group, instance)
+                        if rc:
+                            self._log.debug("Scale in for group {} and instance {} succeeded".
+                                            format(group.name, instance.instance_id))
+                        else:
+                            self._log.error("Post scale in config for scale group {} ({}) failed".
+                                            format(group.name, instance.instance_id))
+
+    def create_vnffgs(self):
+        """ This function creates VNFFGs for every VNFFG in the NSD
+        associated with this NSR"""
+
+        for vnffgd in self.nsd_msg.vnffgd:
+            self._log.debug("Found vnffgd %s in nsr id %s", vnffgd, self.id)
+            vnffgr = VnffgRecord(self._dts,
+                                 self._log,
+                                 self._loop,
+                                 self._nsm._vnffgmgr,
+                                 self,
+                                 self.name,
+                                 vnffgd,
+                                 self._sdn_account_name
+                                 )
+            self._vnffgrs[vnffgr.id] = vnffgr
+
+    def resolve_vld_ip_profile(self, nsd_msg, vld):
+        if not vld.has_field('ip_profile_ref'):
+            return None
+        profile = [ profile for profile in nsd_msg.ip_profiles if profile.name == vld.ip_profile_ref ]
+        return profile[0] if profile else None
+
+    @asyncio.coroutine
+    def _create_vls(self, vld, cloud_account):
+        """Create a VLR in the cloud account specified using the given VLD
+        
+        Args:
+            vld : VLD yang obj
+            cloud_account : Cloud account name
+        
+        Returns:
+            VirtualLinkRecord
+        """
+        vlr = yield from VirtualLinkRecord.create_record(
+                self._dts,
+                self._log,
+                self._loop,
+                self.name,
+                vld,
+                cloud_account,
+                self.resolve_vld_ip_profile(self.nsd_msg, vld),
+                self.id,
+                restart_mode=self.restart_mode)
+
+        return vlr
+
+    def _extract_cloud_accounts_for_vl(self, vld):
+        """
+        Extracts the list of cloud accounts from the NS Config obj
+
+        Rules:
+        1. Cloud accounts based connection point (vnf_cloud_account_map)
+        Args:
+            vld : VLD yang object
+
+        Returns:
+            TYPE: Description
+        """
+        cloud_account_list = []
+
+        if self._nsr_cfg_msg.vnf_cloud_account_map:
+            # Handle case where cloud_account is None
+            vnf_cloud_map = {}
+            for vnf in self._nsr_cfg_msg.vnf_cloud_account_map:
+                if vnf.cloud_account is not None:
+                    vnf_cloud_map[vnf.member_vnf_index_ref] = vnf.cloud_account
+
+            for vnfc in vld.vnfd_connection_point_ref:
+                cloud_account = vnf_cloud_map.get(
+                        vnfc.member_vnf_index_ref,
+                        self.cloud_account_name)
+
+                cloud_account_list.append(cloud_account)
+
+        if self._nsr_cfg_msg.vl_cloud_account_map:
+            for vld_map in self._nsr_cfg_msg.vl_cloud_account_map:
+                if vld_map.vld_id_ref == vld.id:
+                    cloud_account_list.extend(vld_map.cloud_accounts)
+
+        # If no config has been provided then fall-back to the default
+        # account
+        if not cloud_account_list:
+            cloud_account_list = [self.cloud_account_name]
+
+        self._log.debug("VL {} cloud accounts: {}".
+                        format(vld.name, cloud_account_list))
+        return set(cloud_account_list)
+
+    @asyncio.coroutine
+    def create_vls(self):
+        """ This function creates VLs for every VLD in the NSD
+        associated with this NSR"""
+        for vld in self.nsd_msg.vld:
+            self._log.debug("Found vld %s in nsr id %s", vld, self.id)
+            cloud_account_list = self._extract_cloud_accounts_for_vl(vld)
+            for account in cloud_account_list:
+                vlr = yield from self._create_vls(vld, account)
+                self._vlrs.append(vlr)
+
+
+    @asyncio.coroutine
+    def create_vl_instance(self, vld):
+        self._log.debug("Create VL for {}: {}".format(self.id, vld.as_dict()))
+        # Check if the VL is already present
+        vlr = None
+        for vl in self._vlrs:
+            if vl.vld_msg.id == vld.id:
+                self._log.debug("The VLD %s already in NSR %s as VLR %s with status %s",
+                                vld.id, self.id, vl.id, vl.state)
+                vlr = vl
+                if vlr.state != VlRecordState.TERMINATED:
+                    err_msg = "VLR for VL %s in NSR %s already instantiated", \
+                              vld, self.id
+                    self._log.error(err_msg)
+                    raise NsrVlUpdateError(err_msg)
+                break
+
+        if vlr is None:
+            cloud_account_list = self._extract_cloud_accounts_for_vl(vld)
+            for account in cloud_account_list:
+                vlr = yield from self._create_vls(vld, account)
+                self._vlrs.append(vlr)
+
+        vlr.state = VlRecordState.INSTANTIATION_PENDING
+        yield from self.update_state()
+
+        try:
+            yield from self.nsm_plugin.instantiate_vl(self, vlr)
+            vlr.state = VlRecordState.ACTIVE
+
+        except Exception as e:
+            err_msg = "Error instantiating VL for NSR {} and VLD {}: {}". \
+                      format(self.id, vld.id, e)
+            self._log.error(err_msg)
+            self._log.exception(e)
+            vlr.state = VlRecordState.FAILED
+
+        yield from self.update_state()
+
+    @asyncio.coroutine
+    def delete_vl_instance(self, vld):
+        for vlr in self._vlrs:
+            if vlr.vld_msg.id == vld.id:
+                self._log.debug("Found VLR %s for VLD %s in NSR %s",
+                                vlr.id, vld.id, self.id)
+                vlr.state = VlRecordState.TERMINATE_PENDING
+                yield from self.update_state()
+
+                try:
+                    yield from self.nsm_plugin.terminate_vl(vlr)
+                    vlr.state = VlRecordState.TERMINATED
+                    self._vlrs.remove(vlr)
+
+                except Exception as e:
+                    err_msg = "Error terminating VL for NSR {} and VLD {}: {}". \
+                              format(self.id, vld.id, e)
+                    self._log.error(err_msg)
+                    self._log.exception(e)
+                    vlr.state = VlRecordState.FAILED
+
+                yield from self.update_state()
+                break
+
+    @asyncio.coroutine
+    def create_vnfs(self, config_xact):
+        """
+        This function creates VNFs for every VNF in the NSD
+        associated with this NSR
+        """
+        self._log.debug("Creating %u VNFs associated with this NS id %s",
+                        len(self.nsd_msg.constituent_vnfd), self.id)
+
+        for const_vnfd in self.nsd_msg.constituent_vnfd:
+            if not const_vnfd.start_by_default:
+                self._log.debug("start_by_default set to False in constituent VNF (%s). Skipping start.",
+                                const_vnfd.member_vnf_index)
+                continue
+
+            vnfd_msg = self._get_vnfd(const_vnfd.vnfd_id_ref, config_xact)
+            cloud_account_name = self._get_vnfd_cloud_account(const_vnfd.member_vnf_index)
+            if cloud_account_name is None:
+                cloud_account_name = self.cloud_account_name
+            yield from self.create_vnf_record(vnfd_msg, const_vnfd, cloud_account_name)
+
+
+    def get_placement_groups(self, vnfd_msg, const_vnfd):
+        placement_groups = []
+        for group in self.nsd_msg.placement_groups:
+            for member_vnfd in group.member_vnfd:
+                if (member_vnfd.vnfd_id_ref == vnfd_msg.id) and \
+                   (member_vnfd.member_vnf_index_ref == const_vnfd.member_vnf_index):
+                    group_info = self.resolve_placement_group_cloud_construct(group)
+                    if group_info is None:
+                        self._log.error("Could not resolve cloud-construct for placement group: %s", group.name)
+                        ### raise PlacementGroupError("Could not resolve cloud-construct for placement group: {}".format(group.name))
+                    else:
+                        self._log.info("Successfully resolved cloud construct for placement group: %s for VNF: %s (Member Index: %s)",
+                                       str(group_info),
+                                       vnfd_msg.name,
+                                       const_vnfd.member_vnf_index)
+                        placement_groups.append(group_info)
+        return placement_groups
+
+    @asyncio.coroutine
+    def create_vnf_record(self, vnfd_msg, const_vnfd, cloud_account_name, group_name=None, group_instance_id=None):
+        # Fetch the VNFD associated with this VNF
+        placement_groups = self.get_placement_groups(vnfd_msg, const_vnfd)
+        self._log.info("Cloud Account for VNF %d is %s",const_vnfd.member_vnf_index,cloud_account_name)
+        self._log.info("Launching VNF: %s (Member Index: %s) in NSD plancement Groups: %s",
+                       vnfd_msg.name,
+                       const_vnfd.member_vnf_index,
+                       [ group.name for group in placement_groups])
+        vnfr = yield from VirtualNetworkFunctionRecord.create_record(self._dts,
+                                            self._log,
+                                            self._loop,
+                                            vnfd_msg,
+                                            const_vnfd,
+                                            self.nsd_id,
+                                            self.name,
+                                            cloud_account_name,
+                                            self.id,
+                                            group_name,
+                                            group_instance_id,
+                                            placement_groups,
+                                            restart_mode=self.restart_mode,
+                                            )
+        if vnfr.id in self._vnfrs:
+            err = "VNF with VNFR id %s already in vnf list" % (vnfr.id,)
+            raise NetworkServiceRecordError(err)
+
+        self._vnfrs[vnfr.id] = vnfr
+        self._nsm.vnfrs[vnfr.id] = vnfr
+
+        yield from vnfr.set_config_status(NsrYang.ConfigStates.INIT)
+
+        self._log.debug("Added VNFR %s to NSM VNFR list with id %s",
+                        vnfr.name,
+                        vnfr.id)
+
+        return vnfr
+
+    def create_param_pools(self):
+        for param_pool in self.nsd_msg.parameter_pool:
+            self._log.debug("Found parameter pool %s in nsr id %s", param_pool, self.id)
+
+            start_value = param_pool.range.start_value
+            end_value = param_pool.range.end_value
+            if end_value < start_value:
+                raise NetworkServiceRecordError(
+                        "Parameter pool %s has invalid range (start: {}, end: {})".format(
+                            start_value, end_value
+                            )
+                        )
+
+            self._param_pools[param_pool.name] = config_value_pool.ParameterValuePool(
+                    self._log,
+                    param_pool.name,
+                    range(start_value, end_value)
+                    )
+
+    @asyncio.coroutine
+    def fetch_vnfr(self, vnfr_path):
+        """ Fetch VNFR record """
+        vnfr = None
+        self._log.debug("Fetching VNFR with key %s while instantiating %s",
+                        vnfr_path, self.id)
+        res_iter = yield from self._dts.query_read(vnfr_path, rwdts.XactFlag.MERGE)
+
+        for ent in res_iter:
+            res = yield from ent
+            vnfr = res.result
+
+        return vnfr
+
+    @asyncio.coroutine
+    def instantiate_vnfs(self, vnfrs):
+        """
+        This function instantiates VNFs for every VNF in this Network Service
+        """
+        self._log.debug("Instantiating %u VNFs in NS %s", len(vnfrs), self.id)
+        for vnf in vnfrs:
+            self._log.debug("Instantiating VNF: %s in NS %s", vnf, self.id)
+            yield from self.nsm_plugin.instantiate_vnf(self, vnf)
+
+    @asyncio.coroutine
+    def instantiate_vnffgs(self):
+        """
+        This function instantiates VNFFGs for every VNFFG in this Network Service
+        """
+        self._log.debug("Instantiating %u VNFFGs in NS %s",
+                        len(self.nsd_msg.vnffgd), self.id)
+        for _, vnfr in self.vnfrs.items():
+            while vnfr.state in [VnfRecordState.INSTANTIATION_PENDING, VnfRecordState.INIT]:
+                self._log.debug("Received vnfr state for vnfr %s is %s; retrying",vnfr.name,vnfr.state)
+                yield from asyncio.sleep(2, loop=self._loop)
+            if vnfr.state == VnfRecordState.ACTIVE:
+                self._log.debug("Received vnfr state for vnfr %s is %s ",vnfr.name,vnfr.state)
+                continue
+            else:
+                self._log.debug("Received vnfr state for vnfr %s is %s; failing vnffg creation",vnfr.name,vnfr.state)
+                self._vnffgr_state = VnffgRecordState.FAILED
+                return
+
+        self._log.info("Waiting for 90 seconds for VMs to come up")
+        yield from asyncio.sleep(90, loop=self._loop)
+        self._log.info("Starting VNFFG orchestration")
+        for vnffg in self._vnffgrs.values():
+            self._log.debug("Instantiating VNFFG: %s in NS %s", vnffg, self.id)
+            yield from vnffg.instantiate()
+
+    @asyncio.coroutine
+    def instantiate_scaling_instances(self, config_xact):
+        """ Instantiate any default scaling instances in this Network Service """
+        for group in self._scaling_groups.values():
+            for i in range(group.min_instance_count):
+                self._log.debug("Instantiating %s default scaling instance %s", group, i)
+                yield from self.create_scale_group_instance(
+                        group.name, i, config_xact, is_default=True
+                        )
+
+            for group_msg in self._nsr_cfg_msg.scaling_group:
+                if group_msg.scaling_group_name_ref != group.name:
+                    continue
+
+                for instance in group_msg.instance:
+                    self._log.debug("Reloading %s scaling instance %s", group_msg, instance.id)
+                    yield from self.create_scale_group_instance(
+                            group.name, instance.id, config_xact, is_default=False
+                            )
+
+    def has_scaling_instances(self):
+        """ Return boolean indicating if the network service has default scaling groups """
+        for group in self._scaling_groups.values():
+            if group.min_instance_count > 0:
+                return True
+
+        for group_msg in self._nsr_cfg_msg.scaling_group:
+            if len(group_msg.instance) > 0:
+                return True
+
+        return False
+
+    @asyncio.coroutine
+    def publish(self):
+        """ This function publishes this NSR """
+        self._nsr_msg = self.create_msg()
+
+        self._log.debug("Publishing the NSR with xpath %s and nsr %s",
+                        self.nsr_xpath,
+                        self._nsr_msg)
+
+        if self._debug_running:
+            self._log.debug("Publishing NSR in RUNNING state!")
+            #raise()
+
+        with self._dts.transaction() as xact:
+            yield from self._nsm.nsr_handler.update(xact, self.nsr_xpath, self._nsr_msg)
+            if self._op_status.state == NetworkServiceRecordState.RUNNING:
+                self._debug_running = True
+
+    @asyncio.coroutine
+    def unpublish(self, xact):
+        """ Unpublish this NSR object """
+        self._log.debug("Unpublishing Network service id %s", self.id)
+        yield from self._nsm.nsr_handler.delete(xact, self.nsr_xpath)
+
+    @property
+    def nsr_xpath(self):
+        """ Returns the xpath associated with this NSR """
+        return(
+            "D,/nsr:ns-instance-opdata" +
+            "/nsr:nsr[nsr:ns-instance-config-ref = '{}']"
+            ).format(self.id)
+
+    @staticmethod
+    def xpath_from_nsr(nsr):
+        """ Returns the xpath associated with this NSR  op data"""
+        return (NetworkServiceRecord.XPATH +
+                "[nsr:ns-instance-config-ref = '{}']").format(nsr.id)
+
+    @property
+    def nsd_xpath(self):
+        """ Return NSD config xpath."""
+        return(
+            "C,/nsd:nsd-catalog/nsd:nsd[nsd:id = '{}']"
+            ).format(self.nsd_id)
+
+    @asyncio.coroutine
+    def instantiate(self, config_xact):
+        """"Instantiates a NetworkServiceRecord.
+
+        This function instantiates a Network service
+        which involves the following steps,
+
+        * Instantiate every VL in NSD by sending create VLR request to DTS.
+        * Instantiate every VNF in NSD by sending create VNF reuqest to DTS.
+        * Publish the NSR details to DTS
+
+        Arguments:
+            nsr:  The NSR configuration request containing nsr-id and nsd
+            config_xact: The configuration transaction which initiated the instatiation
+
+        Raises:
+            NetworkServiceRecordError if the NSR creation fails
+
+        Returns:
+            No return value
+        """
+
+        self._log.debug("Instantiating NS - %s xact - %s", self, config_xact)
+
+        # Move the state to INIITALIZING
+        self.set_state(NetworkServiceRecordState.INIT)
+
+        event_descr = "Instantiation Request Received NSR Id:%s" % self.id
+        self.record_event("instantiating", event_descr)
+
+        # Find the NSD
+        self._nsd = self._nsr_cfg_msg.nsd
+
+        try:
+            # Update ref count if nsd present in catalog
+            self._nsm.get_nsd_ref(self.nsd_id)
+
+        except NetworkServiceDescriptorError:
+            # This could be an NSD not in the nsd-catalog
+            pass
+
+        # Merge any config and initial config primitive values
+        self.config_store.merge_nsd_config(self.nsd_msg)
+        self._log.debug("Merged NSD: {}".format(self.nsd_msg.as_dict()))
+
+        event_descr = "Fetched NSD with descriptor id %s" % self.nsd_id
+        self.record_event("nsd-fetched", event_descr)
+
+        if self._nsd is None:
+            msg = "Failed to fetch NSD with nsd-id [%s] for nsr-id %s"
+            self._log.debug(msg, self.nsd_id, self.id)
+            raise NetworkServiceRecordError(self)
+
+        self._log.debug("Got nsd result %s", self._nsd)
+
+        # Substitute any input parameters
+        self.substitute_input_parameters(self._nsd, self._nsr_cfg_msg)
+
+        # Create the record
+        yield from self.create(config_xact)
+
+        # Publish the NSR to DTS
+        yield from self.publish()
+
+        @asyncio.coroutine
+        def do_instantiate():
+            """
+                Instantiate network service
+            """
+            self._log.debug("Instantiating VLs nsr id [%s] nsd id [%s]",
+                            self.id, self.nsd_id)
+
+            # instantiate the VLs
+            event_descr = ("Instantiating %s external VLs for NSR id %s" %
+                           (len(self.nsd_msg.vld), self.id))
+            self.record_event("begin-external-vls-instantiation", event_descr)
+
+            self.set_state(NetworkServiceRecordState.VL_INIT_PHASE)
+
+            yield from self.instantiate_vls()
+
+            # Publish the NSR to DTS
+            yield from self.publish()
+
+            event_descr = ("Finished instantiating %s external VLs for NSR id %s" %
+                           (len(self.nsd_msg.vld), self.id))
+            self.record_event("end-external-vls-instantiation", event_descr)
+
+            self.set_state(NetworkServiceRecordState.VNF_INIT_PHASE)
+
+            self._log.debug("Instantiating VNFs  ...... nsr[%s], nsd[%s]",
+                            self.id, self.nsd_id)
+
+            # instantiate the VNFs
+            event_descr = ("Instantiating %s VNFS for NSR id %s" %
+                           (len(self.nsd_msg.constituent_vnfd), self.id))
+
+            self.record_event("begin-vnf-instantiation", event_descr)
+
+            yield from self.instantiate_vnfs(self._vnfrs.values())
+
+            self._log.debug(" Finished instantiating %d VNFs for NSR id %s",
+                            len(self.nsd_msg.constituent_vnfd), self.id)
+
+            event_descr = ("Finished instantiating %s VNFs for NSR id %s" %
+                           (len(self.nsd_msg.constituent_vnfd), self.id))
+            self.record_event("end-vnf-instantiation", event_descr)
+
+            if len(self.vnffgrs) > 0:
+                #self.set_state(NetworkServiceRecordState.VNFFG_INIT_PHASE)
+                event_descr = ("Instantiating %s VNFFGS for NSR id %s" %
+                               (len(self.nsd_msg.vnffgd), self.id))
+
+                self.record_event("begin-vnffg-instantiation", event_descr)
+
+                yield from self.instantiate_vnffgs()
+
+                event_descr = ("Finished instantiating %s VNFFGDs for NSR id %s" %
+                               (len(self.nsd_msg.vnffgd), self.id))
+                self.record_event("end-vnffg-instantiation", event_descr)
+
+            if self.has_scaling_instances():
+                event_descr = ("Instantiating %s Scaling Groups for NSR id %s" %
+                               (len(self._scaling_groups), self.id))
+
+                self.record_event("begin-scaling-group-instantiation", event_descr)
+                yield from self.instantiate_scaling_instances(config_xact)
+                self.record_event("end-scaling-group-instantiation", event_descr)
+
+            # Give the plugin a chance to deploy the network service now that all
+            # virtual links and vnfs are instantiated
+            yield from self.nsm_plugin.deploy(self._nsr_msg)
+
+            self._log.debug("Publishing  NSR...... nsr[%s], nsd[%s]",
+                            self.id, self.nsd_id)
+
+            # Publish the NSR to DTS
+            yield from self.publish()
+
+            self._log.debug("Published  NSR...... nsr[%s], nsd[%s]",
+                            self.id, self.nsd_id)
+
+        def on_instantiate_done(fut):
+            # If the do_instantiate fails, then publish NSR with failed result
+            if fut.exception() is not None:
+                self._log.error("NSR instantiation failed for NSR id %s: %s", self.id, str(fut.exception()))
+                self._loop.create_task(self.instantiation_failed(failed_reason=str(fut.exception())))
+
+        instantiate_task = self._loop.create_task(do_instantiate())
+        instantiate_task.add_done_callback(on_instantiate_done)
+
+    @asyncio.coroutine
+    def set_config_status(self, status, status_details=None):
+        if self.config_status != status:
+            self._log.debug("Updating NSR {} status for {} to {}".
+                            format(self.name, self.config_status, status))
+            self._config_status = status
+            self._config_status_details = status_details
+
+            if self._config_status == NsrYang.ConfigStates.FAILED:
+                self.record_event("config-failed", "NS configuration failed",
+                        evt_details=self._config_status_details)
+
+            yield from self.publish()
+
+    @asyncio.coroutine
+    def is_active(self):
+        """ This NS is active """
+        self.set_state(NetworkServiceRecordState.RUNNING)
+        if self._is_active:
+            return
+
+        # Publish the NSR to DTS
+        self._log.debug("Network service %s is active ", self.id)
+        self._is_active = True
+
+        event_descr = "NSR in running state for NSR id %s" % self.id
+        self.record_event("ns-running", event_descr)
+
+        yield from self.publish()
+
+    @asyncio.coroutine
+    def instantiation_failed(self, failed_reason=None):
+        """ The NS instantiation failed"""
+        self._log.error("Network service id:%s, name:%s instantiation failed",
+                        self.id, self.name)
+        self.set_state(NetworkServiceRecordState.FAILED)
+
+        event_descr = "Instantiation of NS %s failed" % self.id
+        self.record_event("ns-failed", event_descr, evt_details=failed_reason)
+
+        # Publish the NSR to DTS
+        yield from self.publish()
+
+    @asyncio.coroutine
+    def terminate_vnfrs(self, vnfrs):
+        """ Terminate VNFRS in this network service """
+        self._log.debug("Terminating VNFs in network service %s", self.id)
+        for vnfr in vnfrs:
+            yield from self.nsm_plugin.terminate_vnf(vnfr)
+
+    @asyncio.coroutine
+    def terminate(self):
+        """ Terminate a NetworkServiceRecord."""
+        def terminate_vnffgrs():
+            """ Terminate VNFFGRS in this network service """
+            self._log.debug("Terminating VNFFGRs in network service %s", self.id)
+            for vnffgr in self.vnffgrs.values():
+                yield from vnffgr.terminate()
+
+        def terminate_vlrs():
+            """ Terminate VLRs in this netork service """
+            self._log.debug("Terminating VLs in network service %s", self.id)
+            for vlr in self.vlrs:
+                yield from self.nsm_plugin.terminate_vl(vlr)
+                vlr.state = VlRecordState.TERMINATED
+
+        self._log.debug("Terminating network service id %s", self.id)
+
+        # Move the state to TERMINATE
+        self.set_state(NetworkServiceRecordState.TERMINATE)
+        event_descr = "Terminate being processed for NS Id:%s" % self.id
+        self.record_event("terminate", event_descr)
+
+        # Move the state to VNF_TERMINATE_PHASE
+        self._log.debug("Terminating VNFFGs in NS ID: %s", self.id)
+        self.set_state(NetworkServiceRecordState.VNFFG_TERMINATE_PHASE)
+        event_descr = "Terminating VNFFGS in NS Id:%s" % self.id
+        self.record_event("terminating-vnffgss", event_descr)
+        yield from terminate_vnffgrs()
+
+        # Move the state to VNF_TERMINATE_PHASE
+        self.set_state(NetworkServiceRecordState.VNF_TERMINATE_PHASE)
+        event_descr = "Terminating VNFS in NS Id:%s" % self.id
+        self.record_event("terminating-vnfs", event_descr)
+        yield from self.terminate_vnfrs(self.vnfrs.values())
+
+        # Move the state to VL_TERMINATE_PHASE
+        self.set_state(NetworkServiceRecordState.VL_TERMINATE_PHASE)
+        event_descr = "Terminating VLs in NS Id:%s" % self.id
+        self.record_event("terminating-vls", event_descr)
+        yield from terminate_vlrs()
+
+        yield from self.nsm_plugin.terminate_ns(self)
+
+        # Move the state to TERMINATED
+        self.set_state(NetworkServiceRecordState.TERMINATED)
+        event_descr = "Terminated NS Id:%s" % self.id
+        self.record_event("terminated", event_descr)
+
+    def enable(self):
+        """"Enable a NetworkServiceRecord."""
+        pass
+
+    def disable(self):
+        """"Disable a NetworkServiceRecord."""
+        pass
+
+    def map_config_status(self):
+        self._log.debug("Config status for ns {} is {}".
+                        format(self.name, self._config_status))
+        if self._config_status == NsrYang.ConfigStates.CONFIGURING:
+            return 'configuring'
+        if self._config_status == NsrYang.ConfigStates.FAILED:
+            return 'failed'
+        return 'configured'
+
+    def vl_phase_completed(self):
+        """ Are VLs created in this NS?"""
+        return self._vl_phase_completed
+
+    def vnf_phase_completed(self):
+        """ Are VLs created in this NS?"""
+        return self._vnf_phase_completed
+
+    def create_msg(self):
+        """ The network serice record as a message """
+        nsr_dict = {"ns_instance_config_ref": self.id}
+        nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict(nsr_dict)
+        #nsr.cloud_account = self.cloud_account_name
+        nsr.sdn_account = self._sdn_account_name
+        nsr.name_ref = self.name
+        nsr.nsd_ref = self.nsd_id
+        nsr.nsd_name_ref = self.nsd_msg.name
+        nsr.operational_events = self._op_status.msg
+        nsr.operational_status = self._op_status.yang_str()
+        nsr.config_status = self.map_config_status()
+        nsr.config_status_details = self._config_status_details
+        nsr.create_time = self._create_time
+
+        for cfg_prim in self.nsd_msg.service_primitive:
+            cfg_prim = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ServicePrimitive.from_dict(
+                    cfg_prim.as_dict())
+            nsr.service_primitive.append(cfg_prim)
+
+        for init_cfg in self.nsd_msg.initial_config_primitive:
+            prim = NsrYang.NsrInitialConfigPrimitive.from_dict(
+                init_cfg.as_dict())
+            nsr.initial_config_primitive.append(prim)
+
+        if self.vl_phase_completed():
+            for vlr in self.vlrs:
+                nsr.vlr.append(vlr.create_nsr_vlr_msg(self.vnfrs.values()))
+
+        if self.vnf_phase_completed():
+            for vnfr_id in self.vnfrs:
+                nsr.constituent_vnfr_ref.append(self.vnfrs[vnfr_id].const_vnfr_msg)
+            for vnffgr in self.vnffgrs.values():
+                nsr.vnffgr.append(vnffgr.fetch_vnffgr())
+            for scaling_group in self._scaling_groups.values():
+                nsr.scaling_group_record.append(scaling_group.create_record_msg())
+
+        return nsr
+
+    def all_vnfs_active(self):
+        """ Are all VNFS in this NS active? """
+        for _, vnfr in self.vnfrs.items():
+            if vnfr.active is not True:
+                return False
+        return True
+
+    @asyncio.coroutine
+    def update_state(self):
+        """ Re-evaluate this  NS's state """
+        curr_state = self._op_status.state
+
+        if curr_state == NetworkServiceRecordState.TERMINATED:
+            self._log.debug("NS (%s) in terminated state, not updating state", self.id)
+            return
+
+        new_state = NetworkServiceRecordState.RUNNING
+        self._log.info("Received update_state for nsr: %s, curr-state: %s",
+                       self.id, curr_state)
+
+        # Check all the VNFRs are present
+        for _, vnfr in self.vnfrs.items():
+            if vnfr.state in [VnfRecordState.ACTIVE, VnfRecordState.TERMINATED]:
+                pass
+            elif vnfr.state == VnfRecordState.FAILED:
+                if vnfr._prev_state != vnfr.state:
+                    event_descr = "Instantiation of VNF %s failed" % vnfr.id
+                    event_error_details = vnfr.state_failed_reason
+                    self.record_event("vnf-failed", event_descr, evt_details=event_error_details)
+                    vnfr.set_state(VnfRecordState.FAILED)
+                else:
+                    self._log.info("VNF state did not change, curr=%s, prev=%s",
+                                   vnfr.state, vnfr._prev_state)
+                new_state = NetworkServiceRecordState.FAILED
+                break
+            else:
+                self._log.info("VNF %s in NSR %s is still not active; current state is: %s",
+                               vnfr.id, self.id, vnfr.state)
+                new_state = curr_state
+
+        # If new state is RUNNING; check all VLs
+        if new_state == NetworkServiceRecordState.RUNNING:
+            for vl in self.vlrs:
+
+                if vl.state in [VlRecordState.ACTIVE, VlRecordState.TERMINATED]:
+                    pass
+                elif vl.state == VlRecordState.FAILED:
+                    if vl.prev_state != vl.state:
+                        event_descr = "Instantiation of VL %s failed" % vl.id
+                        event_error_details = vl.state_failed_reason
+                        self.record_event("vl-failed", event_descr, evt_details=event_error_details)
+                        vl.prev_state = vl.state
+                    else:
+                        self._log.debug("VL %s already in failed state")
+                else:
+                    if vl.state in [VlRecordState.INSTANTIATION_PENDING, VlRecordState.INIT]:
+                        new_state = NetworkServiceRecordState.VL_INSTANTIATE
+                        break
+
+                    if vl.state in [VlRecordState.TERMINATE_PENDING]:
+                        new_state = NetworkServiceRecordState.VL_TERMINATE
+                        break
+
+        # If new state is RUNNING; check VNFFGRs are also active
+        if new_state == NetworkServiceRecordState.RUNNING:
+            for _, vnffgr in self.vnffgrs.items():
+                self._log.info("Checking vnffgr state for nsr %s is: %s",
+                               self.id, vnffgr.state)
+                if vnffgr.state == VnffgRecordState.ACTIVE:
+                    pass
+                elif vnffgr.state == VnffgRecordState.FAILED:
+                    event_descr = "Instantiation of VNFFGR %s failed" % vnffgr.id
+                    self.record_event("vnffg-failed", event_descr)
+                    new_state = NetworkServiceRecordState.FAILED
+                    break
+                else:
+                    self._log.info("VNFFGR %s in NSR %s is still not active; current state is: %s",
+                                    vnffgr.id, self.id, vnffgr.state)
+                    new_state = curr_state
+
+        # Update all the scaling group instance operational status to
+        # reflect the state of all VNFR within that instance
+        yield from self._update_scale_group_instances_status()
+
+        for _, group in self._scaling_groups.items():
+            if group.state == scale_group.ScaleGroupState.SCALING_OUT:
+                new_state = NetworkServiceRecordState.SCALING_OUT
+                break
+            elif group.state == scale_group.ScaleGroupState.SCALING_IN:
+                new_state = NetworkServiceRecordState.SCALING_IN
+                break
+
+        if new_state != curr_state:
+            self._log.debug("Changing state of Network service %s from %s to %s",
+                            self.id, curr_state, new_state)
+            if new_state == NetworkServiceRecordState.RUNNING:
+                yield from self.is_active()
+            elif new_state == NetworkServiceRecordState.FAILED:
+                # If the NS is already active and we entered scaling_in, scaling_out,
+                # do not mark the NS as failing if scaling operation failed.
+                if curr_state in [NetworkServiceRecordState.SCALING_OUT,
+                                  NetworkServiceRecordState.SCALING_IN] and self._is_active:
+                    new_state = NetworkServiceRecordState.RUNNING
+                    self.set_state(new_state)
+                else:
+                    yield from self.instantiation_failed()
+            else:
+                self.set_state(new_state)
+
+        yield from self.publish()
+
+
+class InputParameterSubstitution(object):
+    """
+    This class is responsible for substituting input parameters into an NSD.
+    """
+
+    def __init__(self, log):
+        """Create an instance of InputParameterSubstitution
+
+        Arguments:
+            log - a logger for this object to use
+
+        """
+        self.log = log
+
+    def __call__(self, nsd, nsr_config):
+        """Substitutes input parameters from the NSR config into the NSD
+
+        This call modifies the provided NSD with the input parameters that are
+        contained in the NSR config.
+
+        Arguments:
+            nsd        - a GI NSD object
+            nsr_config - a GI NSR config object
+
+        """
+        if nsd is None or nsr_config is None:
+            return
+
+        # Create a lookup of the xpath elements that this descriptor allows
+        # to be modified
+        optional_input_parameters = set()
+        for input_parameter in nsd.input_parameter_xpath:
+            optional_input_parameters.add(input_parameter.xpath)
+
+        # Apply the input parameters to the descriptor
+        if nsr_config.input_parameter:
+            for param in nsr_config.input_parameter:
+                if param.xpath not in optional_input_parameters:
+                    msg = "tried to set an invalid input parameter ({})"
+                    self.log.error(msg.format(param.xpath))
+                    continue
+
+                self.log.debug(
+                        "input-parameter:{} = {}".format(
+                            param.xpath,
+                            param.value,
+                            )
+                        )
+
+                try:
+                    xpath.setxattr(nsd, param.xpath, param.value)
+
+                except Exception as e:
+                    self.log.exception(e)
+
+
+class NetworkServiceDescriptor(object):
+    """
+    Network service descriptor class
+    """
+
+    def __init__(self, dts, log, loop, nsd, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+        self._nsd = nsd
+        self._ref_count = 0
+
+        self._nsm = nsm
+
+    @property
+    def id(self):
+        """ Returns nsd id """
+        return self._nsd.id
+
+    @property
+    def name(self):
+        """ Returns name of nsd """
+        return self._nsd.name
+
+    @property
+    def ref_count(self):
+        """ Returns reference count"""
+        return self._ref_count
+
+    def in_use(self):
+        """ Returns whether nsd is in use or not """
+        return True if self.ref_count > 0 else False
+
+    def ref(self):
+        """ Take a reference on this object """
+        self._ref_count += 1
+
+    def unref(self):
+        """ Release reference on this object """
+        if self.ref_count < 1:
+            msg = ("Unref on a NSD object - nsd id %s, ref_count = %s" %
+                   (self.id, self.ref_count))
+            self._log.critical(msg)
+            raise NetworkServiceDescriptorError(msg)
+        self._ref_count -= 1
+
+    @property
+    def msg(self):
+        """ Return the message associated with this NetworkServiceDescriptor"""
+        return self._nsd
+
+    @staticmethod
+    def path_for_id(nsd_id):
+        """ Return path for the passed nsd_id"""
+        return "C,/nsd:nsd-catalog/nsd:nsd[nsd:id = '{}'".format(nsd_id)
+
+    def path(self):
+        """ Return the message associated with this NetworkServiceDescriptor"""
+        return NetworkServiceDescriptor.path_for_id(self.id)
+
+    def update(self, nsd):
+        """ Update the NSD descriptor """
+        self._nsd = nsd
+
+
+class NsdDtsHandler(object):
+    """ The network service descriptor DTS handler """
+    XPATH = "C,/nsd:nsd-catalog/nsd:nsd"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle """
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for Nsd create/update/delete/read requests from dts """
+
+        def on_apply(dts, acg, xact, action, scratch):
+            """Apply the  configuration"""
+            is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
+            self._log.debug("Got nsd apply cfg (xact:%s) (action:%s)",
+                            xact, action)
+            # Create/Update an NSD record
+            for cfg in self._regh.get_xact_elements(xact):
+                # Only interested in those NSD cfgs whose ID was received in prepare callback
+                if cfg.id in scratch.get('nsds', []) or is_recovery:
+                    self._nsm.update_nsd(cfg)
+
+            scratch.pop('nsds', None)
+
+            return RwTypes.RwStatus.SUCCESS
+
+        @asyncio.coroutine
+        def delete_nsd_libs(nsd_id):
+            """ Remove any files uploaded with NSD and stored under $RIFT_ARTIFACTS/libs/<id> """
+            try:
+                rift_artifacts_dir = os.environ['RIFT_ARTIFACTS']
+                nsd_dir = os.path.join(rift_artifacts_dir, 'launchpad/libs', nsd_id)
+
+                if os.path.exists (nsd_dir):
+                    shutil.rmtree(nsd_dir, ignore_errors=True)
+            except Exception as e:
+                self._log.error("Exception in cleaning up NSD libs {}: {}".
+                                format(nsd_id, e))
+                self._log.excpetion(e)
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ Prepare callback from DTS for NSD config """
+
+            self._log.info("Got nsd prepare - config received nsd id %s, msg %s",
+                           msg.id, msg)
+
+            fref = ProtobufC.FieldReference.alloc()
+            fref.goto_whole_message(msg.to_pbcm())
+
+            if fref.is_field_deleted():
+                # Delete an NSD record
+                self._log.debug("Deleting NSD with id %s", msg.id)
+                if self._nsm.nsd_in_use(msg.id):
+                    self._log.debug("Cannot delete NSD in use - %s", msg.id)
+                    err = "Cannot delete an NSD in use - %s" % msg.id
+                    raise NetworkServiceDescriptorRefCountExists(err)
+
+                yield from delete_nsd_libs(msg.id)
+                self._nsm.delete_nsd(msg.id)
+            else:
+                # Add this NSD to scratch to create/update in apply callback
+                nsds = scratch.setdefault('nsds', [])
+                nsds.append(msg.id)
+                # acg._scratch['nsds'].append(msg.id)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        self._log.debug(
+            "Registering for NSD config using xpath: %s",
+            NsdDtsHandler.XPATH,
+            )
+
+        acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
+        with self._dts.appconf_group_create(handler=acg_hdl) as acg:
+            # Need a list in scratch to store NSDs to create/update later
+            # acg._scratch['nsds'] = list()
+            self._regh = acg.register(
+                xpath=NsdDtsHandler.XPATH,
+                flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+                on_prepare=on_prepare)
+
+
+class VnfdDtsHandler(object):
+    """ DTS handler for VNFD config changes """
+    XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ DTS registration handle """
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for VNFD configuration"""
+
+        @asyncio.coroutine
+        def on_apply(dts, acg, xact, action, scratch):
+            """Apply the  configuration"""
+            self._log.debug("Got NSM VNFD apply (xact: %s) (action: %s)(scr: %s)",
+                            xact, action, scratch)
+
+            # Create/Update a VNFD record
+            for cfg in self._regh.get_xact_elements(xact):
+                # Only interested in those VNFD cfgs whose ID was received in prepare callback
+                if cfg.id in scratch.get('vnfds', []):
+                    self._nsm.update_vnfd(cfg)
+
+            for cfg in self._regh.elements:
+                if cfg.id in scratch.get('deleted_vnfds', []):
+                    yield from self._nsm.delete_vnfd(cfg.id)
+
+            scratch.pop('vnfds', None)
+            scratch.pop('deleted_vnfds', None)
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ on prepare callback """
+            self._log.debug("Got on prepare for VNFD (path: %s) (action: %s) (msg: %s)",
+                            ks_path.to_xpath(RwNsmYang.get_schema()), xact_info.query_action, msg)
+
+            fref = ProtobufC.FieldReference.alloc()
+            fref.goto_whole_message(msg.to_pbcm())
+
+            # Handle deletes in prepare_callback, but adds/updates in apply_callback
+            if fref.is_field_deleted():
+                self._log.debug("Adding msg to deleted field")
+                deleted_vnfds = scratch.setdefault('deleted_vnfds', [])
+                deleted_vnfds.append(msg.id)
+            else:
+                # Add this VNFD to scratch to create/update in apply callback
+                vnfds = scratch.setdefault('vnfds', [])
+                vnfds.append(msg.id)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        self._log.debug(
+            "Registering for VNFD config using xpath: %s",
+            VnfdDtsHandler.XPATH,
+            )
+        acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
+        with self._dts.appconf_group_create(handler=acg_hdl) as acg:
+            # Need a list in scratch to store VNFDs to create/update later
+            # acg._scratch['vnfds'] = list()
+            # acg._scratch['deleted_vnfds'] = list()
+            self._regh = acg.register(
+                xpath=VnfdDtsHandler.XPATH,
+                flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
+                on_prepare=on_prepare)
+
+class NsrRpcDtsHandler(object):
+    """ The network service instantiation RPC DTS handler """
+    EXEC_NSR_CONF_XPATH = "I,/nsr:start-network-service"
+    EXEC_NSR_CONF_O_XPATH = "O,/nsr:start-network-service"
+    NETCONF_IP_ADDRESS = "127.0.0.1"
+    NETCONF_PORT = 2022
+    NETCONF_USER = "admin"
+    NETCONF_PW = "admin"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+        self._nsd = None
+
+        self._ns_regh = None
+
+        self._manager = None
+
+        self._model = RwYang.Model.create_libncx()
+        self._model.load_schema_ypbc(RwNsrYang.get_schema())
+
+    @property
+    def nsm(self):
+        """ Return the NS manager instance """
+        return self._nsm
+
+    @staticmethod
+    def wrap_netconf_config_xml(xml):
+        xml = '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">{}</config>'.format(xml)
+        return xml
+
+    @asyncio.coroutine
+    def _connect(self, timeout_secs=240):
+
+        start_time = time.time()
+        while (time.time() - start_time) < timeout_secs:
+
+            try:
+                self._log.debug("Attemping NsmTasklet netconf connection.")
+
+                manager = yield from ncclient.asyncio_manager.asyncio_connect(
+                        loop=self._loop,
+                        host=NsrRpcDtsHandler.NETCONF_IP_ADDRESS,
+                        port=NsrRpcDtsHandler.NETCONF_PORT,
+                        username=NsrRpcDtsHandler.NETCONF_USER,
+                        password=NsrRpcDtsHandler.NETCONF_PW,
+                        allow_agent=False,
+                        look_for_keys=False,
+                        hostkey_verify=False,
+                        )
+
+                return manager
+
+            except ncclient.transport.errors.SSHError as e:
+                self._log.warning("Netconf connection to launchpad %s failed: %s",
+                                  NsrRpcDtsHandler.NETCONF_IP_ADDRESS, str(e))
+
+            yield from asyncio.sleep(5, loop=self._loop)
+
+        raise NsrInstantiationFailed("Failed to connect to Launchpad within %s seconds" %
+                                      timeout_secs)
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for NS monitoring read from dts """
+        @asyncio.coroutine
+        def on_ns_config_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts start-network-service"""
+            assert action == rwdts.QueryAction.RPC
+            rpc_ip = msg
+            rpc_op = NsrYang.YangOutput_Nsr_StartNetworkService.from_dict({
+                    "nsr_id":str(uuid.uuid4())
+                })
+            
+            if not ('name' in rpc_ip and  'nsd_ref' in rpc_ip and 'cloud_account' in rpc_ip):
+                self._log.error("Mandatory parameters name or nsd_ref or cloud account not found in start-network-service {}".format(rpc_ip))
+                
+
+            self._log.debug("start-network-service RPC input: {}".format(rpc_ip))
+
+            try:
+                # Add used value to the pool
+                self._log.debug("RPC output: {}".format(rpc_op))
+                nsd_copy = self.nsm.get_nsd(rpc_ip.nsd_ref)
+
+                if not self._manager:
+                    self._manager = yield from self._connect()
+        
+                self._log.debug("Configuring ns-instance-config with name  %s nsd-ref: %s",
+                        rpc_ip.name, rpc_ip.nsd_ref)
+
+                ns_instance_config_dict = {"id":rpc_op.nsr_id, "admin_status":"ENABLED"}
+                ns_instance_config_copy_dict = {k:v for k, v in rpc_ip.as_dict().items()
+                                                if k in RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr().fields}
+                ns_instance_config_dict.update(ns_instance_config_copy_dict)
+
+                ns_instance_config = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr.from_dict(ns_instance_config_dict)
+                ns_instance_config.nsd = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_Nsd()
+                ns_instance_config.nsd.from_dict(nsd_copy.msg.as_dict())
+
+                xml = ns_instance_config.to_xml_v2(self._model)
+                netconf_xml = self.wrap_netconf_config_xml(xml)
+
+                self._log.debug("Sending configure ns-instance-config xml to %s: %s",
+                        netconf_xml, NsrRpcDtsHandler.NETCONF_IP_ADDRESS)
+
+                response = yield from self._manager.edit_config(
+                           target="running",
+                           config=netconf_xml,
+                           )
+                self._log.debug("Received edit config response: %s", str(response))
+
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK,
+                                        NsrRpcDtsHandler.EXEC_NSR_CONF_O_XPATH,
+                                        rpc_op)
+            except Exception as e:
+                self._log.error("Exception processing the "
+                                "start-network-service: {}".format(e))
+                self._log.exception(e)
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK,
+                                        NsrRpcDtsHandler.EXEC_NSR_CONF_O_XPATH)
+
+
+        hdl_ns = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_ns_config_prepare,)
+
+        with self._dts.group_create() as group:
+            self._ns_regh = group.register(xpath=NsrRpcDtsHandler.EXEC_NSR_CONF_XPATH,
+                                           handler=hdl_ns,
+                                           flags=rwdts.Flag.PUBLISHER,
+                                          )
+
+
+class NsrDtsHandler(object):
+    """ The network service DTS handler """
+    NSR_XPATH = "C,/nsr:ns-instance-config/nsr:nsr"
+    SCALE_INSTANCE_XPATH = "C,/nsr:ns-instance-config/nsr:nsr/nsr:scaling-group/nsr:instance"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+
+        self._nsr_regh = None
+        self._scale_regh = None
+
+    @property
+    def nsm(self):
+        """ Return the NS manager instance """
+        return self._nsm
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for Nsr create/update/delete/read requests from dts """
+
+        def nsr_id_from_keyspec(ks):
+            nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+            nsr_id = nsr_path_entry.key00.id
+            return nsr_id
+
+        def group_name_from_keyspec(ks):
+            group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+            group_name = group_path_entry.key00.scaling_group_name_ref
+            return group_name
+
+        def is_instance_in_reg_elements(nsr_id, group_name, instance_id):
+            """ Return boolean indicating if scaling group instance was already commited previously.
+
+            By looking at the existing elements in this registration handle (elements not part
+            of this current xact), we can tell if the instance was configured previously without
+            keeping any application state.
+            """
+            for instance_cfg, keyspec in self._nsr_regh.get_xact_elements(include_keyspec=True):
+                elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                elem_group_name = group_name_from_keyspec(keyspec)
+
+                if elem_nsr_id != nsr_id or group_name != elem_group_name:
+                    continue
+
+                if instance_cfg.id == instance_id:
+                    return True
+
+            return False
+
+        def get_scale_group_instance_delta(nsr_id, group_name, xact):
+            delta = {"added": [], "deleted": []}
+            for instance_cfg, keyspec in self._scale_regh.get_xact_elements(xact, include_keyspec=True):
+                elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                if elem_nsr_id != nsr_id:
+                    continue
+
+                elem_group_name = group_name_from_keyspec(keyspec)
+                if elem_group_name != group_name:
+                    continue
+
+                delta["added"].append(instance_cfg.id)
+
+            for instance_cfg, keyspec in self._scale_regh.get_xact_elements(include_keyspec=True):
+                elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                if elem_nsr_id != nsr_id:
+                    continue
+
+                elem_group_name = group_name_from_keyspec(keyspec)
+                if elem_group_name != group_name:
+                    continue
+
+                if instance_cfg.id in delta["added"]:
+                    delta["added"].remove(instance_cfg.id)
+                else:
+                    delta["deleted"].append(instance_cfg.id)
+
+            return delta
+
+        @asyncio.coroutine
+        def update_nsr_nsd(nsr_id, xact, scratch):
+
+            @asyncio.coroutine
+            def get_nsr_vl_delta(nsr_id, xact, scratch):
+                delta = {"added": [], "deleted": []}
+                for instance_cfg, keyspec in self._nsr_regh.get_xact_elements(xact, include_keyspec=True):
+                    elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                    if elem_nsr_id != nsr_id:
+                        continue
+
+                    if 'vld' in instance_cfg.nsd:
+                        for vld in instance_cfg.nsd.vld:
+                            delta["added"].append(vld)
+
+                for instance_cfg, keyspec in self._nsr_regh.get_xact_elements(include_keyspec=True):
+                    self._log.debug("NSR update: %s", instance_cfg)
+                    elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                    if elem_nsr_id != nsr_id:
+                        continue
+
+                    if 'vld' in instance_cfg.nsd:
+                        for vld in instance_cfg.nsd.vld:
+                            if vld in delta["added"]:
+                                delta["added"].remove(vld)
+                            else:
+                                delta["deleted"].append(vld)
+
+                return delta
+
+            vl_delta = yield from get_nsr_vl_delta(nsr_id, xact, scratch)
+            self._log.debug("Got NSR:%s VL instance delta: %s", nsr_id, vl_delta)
+
+            for vld in vl_delta["added"]:
+                yield from self._nsm.nsr_instantiate_vl(nsr_id, vld)
+
+            for vld in vl_delta["deleted"]:
+                yield from self._nsm.nsr_terminate_vl(nsr_id, vld)
+
+        def get_add_delete_update_cfgs(dts_member_reg, xact, key_name, scratch):
+            # Unfortunately, it is currently difficult to figure out what has exactly
+            # changed in this xact without Pbdelta support (RIFT-4916)
+            # As a workaround, we can fetch the pre and post xact elements and
+            # perform a comparison to figure out adds/deletes/updates
+            xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+            curr_cfgs = list(dts_member_reg.elements)
+
+            xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+            curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+            # Find Adds
+            added_keys = set(xact_key_map) - set(curr_key_map)
+            added_cfgs = [xact_key_map[key] for key in added_keys]
+
+            # Find Deletes
+            deleted_keys = set(curr_key_map) - set(xact_key_map)
+            deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+            # Find Updates
+            updated_keys = set(curr_key_map) & set(xact_key_map)
+            updated_cfgs = [xact_key_map[key] for key in updated_keys
+                            if xact_key_map[key] != curr_key_map[key]]
+
+            return added_cfgs, deleted_cfgs, updated_cfgs
+
+        def on_apply(dts, acg, xact, action, scratch):
+            """Apply the  configuration"""
+            self._log.debug("Got nsr apply (xact: %s) (action: %s)(scr: %s)",
+                            xact, action, scratch)
+
+            def handle_create_nsr(msg, restart_mode=False):
+                # Handle create nsr requests """
+                # Do some validations
+                if not msg.has_field("nsd"):
+                    err = "NSD not provided"
+                    self._log.error(err)
+                    raise NetworkServiceRecordError(err)
+
+                self._log.debug("Creating NetworkServiceRecord %s  from nsr config  %s",
+                               msg.id, msg.as_dict())
+                nsr = self.nsm.create_nsr(msg, restart_mode=restart_mode)
+                return nsr
+
+            def handle_delete_nsr(msg):
+                @asyncio.coroutine
+                def delete_instantiation(ns_id):
+                    """ Delete instantiation """
+                    with self._dts.transaction() as xact:
+                        yield from self._nsm.terminate_ns(ns_id, xact)
+
+                # Handle delete NSR requests
+                self._log.info("Delete req for  NSR Id: %s received", msg.id)
+                # Terminate the NSR instance
+                nsr = self._nsm.get_ns_by_nsr_id(msg.id)
+
+                nsr.set_state(NetworkServiceRecordState.TERMINATE_RCVD)
+                event_descr = "Terminate rcvd for NS Id:%s" % msg.id
+                nsr.record_event("terminate-rcvd", event_descr)
+
+                self._loop.create_task(delete_instantiation(msg.id))
+
+            @asyncio.coroutine
+            def begin_instantiation(nsr):
+                # Begin instantiation
+                self._log.info("Beginning NS instantiation: %s", nsr.id)
+                yield from self._nsm.instantiate_ns(nsr.id, xact)
+
+            self._log.debug("Got nsr apply (xact: %s) (action: %s)(scr: %s)",
+                            xact, action, scratch)
+
+            if action == rwdts.AppconfAction.INSTALL and xact.id is None:
+                for element in self._nsr_regh.elements:
+                    nsr = handle_create_nsr(element, restart_mode=True)
+                    self._loop.create_task(begin_instantiation(nsr))
+
+
+            (added_msgs, deleted_msgs, updated_msgs) = get_add_delete_update_cfgs(self._nsr_regh,
+                                                                                  xact,
+                                                                                  "id",
+                                                                                  scratch)
+            self._log.debug("Added: %s, Deleted: %s, Updated: %s", added_msgs,
+                            deleted_msgs, updated_msgs)
+
+            for msg in added_msgs:
+                if msg.id not in self._nsm.nsrs:
+                    self._log.info("Create NSR received in on_apply to instantiate NS:%s", msg.id)
+                    nsr = handle_create_nsr(msg)
+                    self._loop.create_task(begin_instantiation(nsr))
+
+            for msg in deleted_msgs:
+                self._log.info("Delete NSR received in on_apply to terminate NS:%s", msg.id)
+                try:
+                    handle_delete_nsr(msg)
+                except Exception:
+                    self._log.exception("Failed to terminate NS:%s", msg.id)
+
+            for msg in updated_msgs:
+                self._log.info("Update NSR received in on_apply: %s", msg)
+
+                self._nsm.nsr_update_cfg(msg.id, msg)
+
+                if 'nsd' in msg:
+                    self._loop.create_task(update_nsr_nsd(msg.id, xact, scratch))
+
+                for group in msg.scaling_group:
+                    instance_delta = get_scale_group_instance_delta(msg.id, group.scaling_group_name_ref, xact)
+                    self._log.debug("Got NSR:%s scale group instance delta: %s", msg.id, instance_delta)
+
+                    for instance_id in instance_delta["added"]:
+                        self._nsm.scale_nsr_out(msg.id, group.scaling_group_name_ref, instance_id, xact)
+
+                    for instance_id in instance_delta["deleted"]:
+                        self._nsm.scale_nsr_in(msg.id, group.scaling_group_name_ref, instance_id)
+
+
+            return RwTypes.RwStatus.SUCCESS
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ Prepare calllback from DTS for NSR """
+
+            xpath = ks_path.to_xpath(RwNsrYang.get_schema())
+            action = xact_info.query_action
+            self._log.debug(
+                    "Got Nsr prepare callback (xact: %s) (action: %s) (info: %s), %s:%s)",
+                    xact, action, xact_info, xpath, msg
+                    )
+
+            @asyncio.coroutine
+            def delete_instantiation(ns_id):
+                """ Delete instantiation """
+                yield from self._nsm.terminate_ns(ns_id, None)
+
+            def handle_delete_nsr():
+                """ Handle delete NSR requests """
+                self._log.info("Delete req for  NSR Id: %s received", msg.id)
+                # Terminate the NSR instance
+                nsr = self._nsm.get_ns_by_nsr_id(msg.id)
+
+                nsr.set_state(NetworkServiceRecordState.TERMINATE_RCVD)
+                event_descr = "Terminate rcvd for NS Id:%s" % msg.id
+                nsr.record_event("terminate-rcvd", event_descr)
+
+                self._loop.create_task(delete_instantiation(msg.id))
+
+            fref = ProtobufC.FieldReference.alloc()
+            fref.goto_whole_message(msg.to_pbcm())
+
+            if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE, rwdts.QueryAction.DELETE]:
+                # if this is an NSR create
+                if action != rwdts.QueryAction.DELETE and msg.id not in self._nsm.nsrs:
+                    # Ensure the Cloud account/datacenter has been specified
+                    if not msg.has_field("cloud_account") and not msg.has_field("om_datacenter"):
+                        raise NsrInstantiationFailed("Cloud account or datacenter not specified in NSR")
+
+                    # Check if nsd is specified
+                    if not msg.has_field("nsd"):
+                        raise NsrInstantiationFailed("NSD not specified in NSR")
+
+                else:
+                    nsr = self._nsm.nsrs[msg.id]
+
+                    if msg.has_field("nsd"):
+                        if nsr.state != NetworkServiceRecordState.RUNNING:
+                            raise NsrVlUpdateError("Unable to update VL when NSR not in running state")
+                        if 'vld' not in msg.nsd or len(msg.nsd.vld) == 0:
+                            raise NsrVlUpdateError("NS config NSD should have atleast 1 VLD defined")
+
+                    if msg.has_field("scaling_group"):
+                        if nsr.state != NetworkServiceRecordState.RUNNING:
+                            raise ScalingOperationError("Unable to perform scaling action when NS is not in running state")
+
+                        if len(msg.scaling_group) > 1:
+                            raise ScalingOperationError("Only a single scaling group can be configured at a time")
+
+                        for group_msg in msg.scaling_group:
+                            num_new_group_instances = len(group_msg.instance)
+                            if num_new_group_instances > 1:
+                                raise ScalingOperationError("Only a single scaling instance can be modified at a time")
+
+                            elif num_new_group_instances == 1:
+                                scale_group = nsr.scaling_groups[group_msg.scaling_group_name_ref]
+                                if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+                                    if len(scale_group.instances) == scale_group.max_instance_count:
+                                        raise ScalingOperationError("Max instances for %s reached" % scale_group)
+
+            acg.handle.prepare_complete_ok(xact_info.handle)
+
+
+        self._log.debug("Registering for NSR config using xpath: %s",
+                        NsrDtsHandler.NSR_XPATH)
+
+        acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
+        with self._dts.appconf_group_create(handler=acg_hdl) as acg:
+            self._nsr_regh = acg.register(xpath=NsrDtsHandler.NSR_XPATH,
+                                      flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+                                      on_prepare=on_prepare)
+
+            self._scale_regh = acg.register(
+                                      xpath=NsrDtsHandler.SCALE_INSTANCE_XPATH,
+                                      flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY| rwdts.Flag.CACHE,
+                                      )
+
+
+class NsrOpDataDtsHandler(object):
+    """ The network service op data DTS handler """
+    XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return the registration handle"""
+        return self._regh
+
+    @property
+    def nsm(self):
+        """ Return the NS manager instance """
+        return self._nsm
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for Nsr op data publisher registration"""
+        self._log.debug("Registering Nsr op data path %s as publisher",
+                        NsrOpDataDtsHandler.XPATH)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler()
+        handlers = rift.tasklets.Group.Handler()
+        with self._dts.group_create(handler=handlers) as group:
+            self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ | rwdts.Flag.DATASTORE)
+
+    @asyncio.coroutine
+    def create(self, path, msg):
+        """
+        Create an NS record in DTS with the path and message
+        """
+        self._log.debug("Creating NSR %s:%s", path, msg)
+        self.regh.create_element(path, msg)
+        self._log.debug("Created NSR, %s:%s", path, msg)
+
+    @asyncio.coroutine
+    def update(self, path, msg, flags=rwdts.XactFlag.REPLACE):
+        """
+        Update an NS record in DTS with the path and message
+        """
+        self._log.debug("Updating NSR, %s:%s regh = %s", path, msg, self.regh)
+        self.regh.update_element(path, msg, flags)
+        self._log.debug("Updated NSR, %s:%s", path, msg)
+
+    @asyncio.coroutine
+    def delete(self, path):
+        """
+        Update an NS record in DTS with the path and message
+        """
+        self._log.debug("Deleting NSR path:%s", path)
+        self.regh.delete_element(path)
+        self._log.debug("Deleted NSR path:%s", path)
+
+
+class VnfrDtsHandler(object):
+    """ The virtual network service DTS handler """
+    XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle """
+        return self._regh
+
+    @property
+    def nsm(self):
+        """ Return the NS manager instance """
+        return self._nsm
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for vnfr create/update/delete/ advises from dts """
+
+        def on_commit(xact_info):
+            """ The transaction has been committed """
+            self._log.debug("Got vnfr commit (xact_info: %s)", xact_info)
+            return rwdts.MemberRspCode.ACTION_OK
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            xpath = ks_path.to_xpath(RwNsrYang.get_schema())
+            self._log.debug(
+                "Got vnfr on_prepare cb (xact_info: %s, action: %s): %s:%s",
+                xact_info, action, ks_path, msg
+                )
+
+            schema = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+            path_entry = schema.keyspec_to_entry(ks_path)
+            if path_entry.key00.id not in self._nsm._vnfrs:
+                self._log.error("%s request for non existent record path %s",
+                                action, xpath)
+                xact_info.respond_xpath(rwdts.XactRspCode.NA, xpath)
+
+                return
+
+                self._log.debug("Deleting VNFR with id %s", path_entry.key00.id)
+            if action == rwdts.QueryAction.CREATE or action == rwdts.QueryAction.UPDATE:
+                yield from self._nsm.update_vnfr(msg)
+            elif action == rwdts.QueryAction.DELETE:
+                self._log.debug("Deleting VNFR with id %s", path_entry.key00.id)
+                self._nsm.delete_vnfr(path_entry.key00.id)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath)
+
+        self._log.debug("Registering for VNFR using xpath: %s",
+                        VnfrDtsHandler.XPATH,)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler(on_commit=on_commit,
+                                                    on_prepare=on_prepare,)
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=VnfrDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=(rwdts.Flag.SUBSCRIBER),)
+
+
+class NsdRefCountDtsHandler(object):
+    """ The NSD Ref Count DTS handler """
+    XPATH = "D,/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle """
+        return self._regh
+
+    @property
+    def nsm(self):
+        """ Return the NS manager instance """
+        return self._nsm
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for NSD ref count read from dts """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            xpath = ks_path.to_xpath(RwNsrYang.get_schema())
+
+            if action == rwdts.QueryAction.READ:
+                schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                nsd_list = yield from self._nsm.get_nsd_refcount(path_entry.key00.nsd_id_ref)
+                for xpath, msg in nsd_list:
+                    xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.MORE,
+                                            xpath=xpath,
+                                            msg=msg)
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+            else:
+                raise NetworkServiceRecordError("Not supported operation %s" % action)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=NsdRefCountDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=rwdts.Flag.PUBLISHER,)
+
+
+class NsManager(object):
+    """ The Network Service Manager class"""
+    def __init__(self, dts, log, loop,
+                 nsr_handler, vnfr_handler, vlr_handler, ro_plugin_selector,
+                 vnffgmgr, vnfd_pub_handler, cloud_account_handler):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsr_handler = nsr_handler
+        self._vnfr_pub_handler = vnfr_handler
+        self._vlr_pub_handler = vlr_handler
+        self._vnffgmgr = vnffgmgr
+        self._vnfd_pub_handler = vnfd_pub_handler
+        self._cloud_account_handler = cloud_account_handler
+
+        self._ro_plugin_selector = ro_plugin_selector
+        self._ncclient = rift.mano.ncclient.NcClient(
+              host="127.0.0.1",
+              port=2022,
+              username="admin",
+              password="admin",
+              loop=self._loop)
+
+        self._nsrs = {}
+        self._nsds = {}
+        self._vnfds = {}
+        self._vnfrs = {}
+
+        self.cfgmgr_obj = conman.ROConfigManager(log, loop, dts, self)
+
+        # TODO: All these handlers should move to tasklet level.
+        # Passing self is often an indication of bad design
+        self._nsd_dts_handler = NsdDtsHandler(dts, log, loop, self)
+        self._vnfd_dts_handler = VnfdDtsHandler(dts, log, loop, self)
+        self._dts_handlers = [self._nsd_dts_handler,
+                              VnfrDtsHandler(dts, log, loop, self),
+                              NsdRefCountDtsHandler(dts, log, loop, self),
+                              NsrDtsHandler(dts, log, loop, self),
+                              ScalingRpcHandler(log, dts, loop, self.scale_rpc_callback),
+                              NsrRpcDtsHandler(dts,log,loop,self),
+                              self._vnfd_dts_handler,
+                              self.cfgmgr_obj,
+                              ]
+
+
+    @property
+    def log(self):
+        """ Log handle """
+        return self._log
+
+    @property
+    def loop(self):
+        """ Loop """
+        return self._loop
+
+    @property
+    def dts(self):
+        """ DTS handle """
+        return self._dts
+
+    @property
+    def nsr_handler(self):
+        """" NSR handler """
+        return self._nsr_handler
+
+    @property
+    def so_obj(self):
+        """" So Obj handler """
+        return self._so_obj
+
+    @property
+    def nsrs(self):
+        """ NSRs in this NSM"""
+        return self._nsrs
+
+    @property
+    def nsds(self):
+        """ NSDs in this NSM"""
+        return self._nsds
+
+    @property
+    def vnfds(self):
+        """ VNFDs in this NSM"""
+        return self._vnfds
+
+    @property
+    def vnfrs(self):
+        """ VNFRs in this NSM"""
+        return self._vnfrs
+
+    @property
+    def nsr_pub_handler(self):
+        """ NSR publication handler """
+        return self._nsr_handler
+
+    @property
+    def vnfr_pub_handler(self):
+        """ VNFR publication handler """
+        return self._vnfr_pub_handler
+
+    @property
+    def vlr_pub_handler(self):
+        """ VLR publication handler """
+        return self._vlr_pub_handler
+
+    @property
+    def vnfd_pub_handler(self):
+        return self._vnfd_pub_handler
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register all static DTS handlers """
+        for dts_handle in self._dts_handlers:
+            yield from dts_handle.register()
+
+
+    def get_ns_by_nsr_id(self, nsr_id):
+        """ get NSR by nsr id """
+        if nsr_id not in self._nsrs:
+            raise NetworkServiceRecordError("NSR id %s not found" % nsr_id)
+
+        return self._nsrs[nsr_id]
+
+    def scale_nsr_out(self, nsr_id, scale_group_name, instance_id, config_xact):
+        self.log.debug("Scale out NetworkServiceRecord (nsr_id: %s) (scaling group: %s) (instance_id: %s)",
+                       nsr_id,
+                       scale_group_name,
+                       instance_id
+                       )
+        nsr = self._nsrs[nsr_id]
+        if nsr.state != NetworkServiceRecordState.RUNNING:
+            raise ScalingOperationError("Cannot perform scaling operation if NSR is not in running state")
+
+        self._loop.create_task(nsr.create_scale_group_instance(scale_group_name, instance_id, config_xact))
+
+    def scale_nsr_in(self, nsr_id, scale_group_name, instance_id):
+        self.log.debug("Scale in NetworkServiceRecord (nsr_id: %s) (scaling group: %s) (instance_id: %s)",
+                       nsr_id,
+                       scale_group_name,
+                       instance_id,
+                       )
+        nsr = self._nsrs[nsr_id]
+        if nsr.state != NetworkServiceRecordState.RUNNING:
+            raise ScalingOperationError("Cannot perform scaling operation if NSR is not in running state")
+
+        self._loop.create_task(nsr.delete_scale_group_instance(scale_group_name, instance_id))
+
+    def scale_rpc_callback(self, xact, msg, action):
+        """Callback handler for RPC calls
+        Args:
+            xact : Transaction Handler
+            msg : RPC input
+            action : Scaling Action
+        """
+        ScalingGroupInstance = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance
+        ScalingGroup = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup
+
+        xpath = ('C,/nsr:ns-instance-config/nsr:nsr[nsr:id="{}"]').format(
+                          msg.nsr_id_ref)
+        instance = ScalingGroupInstance.from_dict({"id": msg.instance_id})
+
+        @asyncio.coroutine
+        def get_nsr_scaling_group():
+            results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
+
+            for result in results:
+                res = yield from result
+                nsr_config = res.result
+
+            for scaling_group in nsr_config.scaling_group:
+                if scaling_group.scaling_group_name_ref == msg.scaling_group_name_ref:
+                    break
+            else:
+                scaling_group = nsr_config.scaling_group.add()
+                scaling_group.scaling_group_name_ref = msg.scaling_group_name_ref
+
+            return (nsr_config, scaling_group)
+
+        @asyncio.coroutine
+        def update_config(nsr_config):
+            xml = self._ncclient.convert_to_xml(RwNsrYang, nsr_config)
+            xml = '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">{}</config>'.format(xml)
+            yield from self._ncclient.connect()
+            yield from self._ncclient.manager.edit_config(target="running", config=xml, default_operation="replace")
+
+        @asyncio.coroutine
+        def scale_out():
+            nsr_config, scaling_group = yield from get_nsr_scaling_group()
+            scaling_group.instance.append(instance)
+            yield from update_config(nsr_config)
+
+        @asyncio.coroutine
+        def scale_in():
+            nsr_config, scaling_group = yield from get_nsr_scaling_group()
+            scaling_group.instance.remove(instance)
+            yield from update_config(nsr_config)
+
+        if action == ScalingRpcHandler.ACTION.SCALE_OUT:
+            self._loop.create_task(scale_out())
+        else:
+            self._loop.create_task(scale_in())
+
+        # Opdata based calls, disabled for now!
+        # if action == ScalingRpcHandler.ACTION.SCALE_OUT:
+        #     self.scale_nsr_out(
+        #           msg.nsr_id_ref,
+        #           msg.scaling_group_name_ref,
+        #           msg.instance_id,
+        #           xact)
+        # else:
+        #     self.scale_nsr_in(
+        #           msg.nsr_id_ref,
+        #           msg.scaling_group_name_ref,
+        #           msg.instance_id)
+    
+    def nsr_update_cfg(self, nsr_id, msg):
+        nsr = self._nsrs[nsr_id]
+        nsr.nsr_cfg_msg= msg
+
+    def nsr_instantiate_vl(self, nsr_id, vld):
+        self.log.debug("NSR {} create VL {}".format(nsr_id, vld))
+        nsr = self._nsrs[nsr_id]
+        if nsr.state != NetworkServiceRecordState.RUNNING:
+            raise NsrVlUpdateError("Cannot perform VL instantiate if NSR is not in running state")
+
+        # Not calling in a separate task as this is called from a separate task
+        yield from nsr.create_vl_instance(vld)
+
+    def nsr_terminate_vl(self, nsr_id, vld):
+        self.log.debug("NSR {} delete VL {}".format(nsr_id, vld.id))
+        nsr = self._nsrs[nsr_id]
+        if nsr.state != NetworkServiceRecordState.RUNNING:
+            raise NsrVlUpdateError("Cannot perform VL terminate if NSR is not in running state")
+
+        # Not calling in a separate task as this is called from a separate task
+        yield from nsr.delete_vl_instance(vld)
+
+    def create_nsr(self, nsr_msg, restart_mode=False):
+        """ Create an NSR instance """
+        if nsr_msg.id in self._nsrs:
+            msg = "NSR id %s already exists" % nsr_msg.id
+            self._log.error(msg)
+            raise NetworkServiceRecordError(msg)
+
+        self._log.info("Create NetworkServiceRecord nsr id %s from nsd_id %s",
+                       nsr_msg.id,
+                       nsr_msg.nsd.id)
+
+        nsm_plugin = self._ro_plugin_selector.ro_plugin
+        sdn_account_name = self._cloud_account_handler.get_cloud_account_sdn_name(nsr_msg.cloud_account)
+
+        nsr = NetworkServiceRecord(self._dts,
+                                   self._log,
+                                   self._loop,
+                                   self,
+                                   nsm_plugin,
+                                   nsr_msg,
+                                   sdn_account_name,
+                                   restart_mode=restart_mode
+                                   )
+        self._nsrs[nsr_msg.id] = nsr
+        nsm_plugin.create_nsr(nsr_msg, nsr_msg.nsd)
+
+        return nsr
+
+    def delete_nsr(self, nsr_id):
+        """
+        Delete NSR with the passed nsr id
+        """
+        del self._nsrs[nsr_id]
+
+    @asyncio.coroutine
+    def instantiate_ns(self, nsr_id, config_xact):
+        """ Instantiate an NS instance """
+        self._log.debug("Instantiating Network service id %s", nsr_id)
+        if nsr_id not in self._nsrs:
+            err = "NSR id %s not found " % nsr_id
+            self._log.error(err)
+            raise NetworkServiceRecordError(err)
+
+        nsr = self._nsrs[nsr_id]
+        yield from nsr.nsm_plugin.instantiate_ns(nsr, config_xact)
+
+    @asyncio.coroutine
+    def update_vnfr(self, vnfr):
+        """Create/Update an VNFR """
+
+        vnfr_state = self._vnfrs[vnfr.id].state
+        self._log.debug("Updating VNFR with state %s: vnfr %s", vnfr_state, vnfr)
+
+        yield from self._vnfrs[vnfr.id].update_state(vnfr)
+        nsr = self.find_nsr_for_vnfr(vnfr.id)
+        yield from nsr.update_state()
+
+    def find_nsr_for_vnfr(self, vnfr_id):
+        """ Find the NSR which )has the passed vnfr id"""
+        for nsr in list(self.nsrs.values()):
+            for vnfr in list(nsr.vnfrs.values()):
+                if vnfr.id == vnfr_id:
+                    return nsr
+        return None
+
+    def delete_vnfr(self, vnfr_id):
+        """ Delete VNFR  with the passed id"""
+        del self._vnfrs[vnfr_id]
+
+    def get_nsd_ref(self, nsd_id):
+        """ Get network service descriptor for the passed nsd_id
+            with a reference"""
+        nsd = self.get_nsd(nsd_id)
+        nsd.ref()
+        return nsd
+
+    @asyncio.coroutine
+    def get_nsr_config(self, nsd_id):
+        xpath = "C,/nsr:ns-instance-config"
+        results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
+
+        for result in results:
+            entry = yield from result
+            ns_instance_config = entry.result
+
+            for nsr in ns_instance_config.nsr:
+                if nsr.nsd.id == nsd_id:
+                    return nsr
+
+        return None
+
+    @asyncio.coroutine
+    def nsd_unref_by_nsr_id(self, nsr_id):
+        """ Unref the network service descriptor based on NSR id """
+        self._log.debug("NSR Unref called for Nsr Id:%s", nsr_id)
+        if nsr_id in self._nsrs:
+            nsr = self._nsrs[nsr_id]
+
+            try:
+                nsd = self.get_nsd(nsr.nsd_id)
+                self._log.debug("Releasing ref on NSD %s held by NSR %s - Curr %d",
+                                nsd.id, nsr.id, nsd.ref_count)
+                nsd.unref()
+            except NetworkServiceDescriptorError:
+                # We store a copy of NSD in NSR and the NSD in nsd-catalog
+                # could be deleted
+                pass
+
+        else:
+            self._log.error("Cannot find NSR with id %s", nsr_id)
+            raise NetworkServiceDescriptorUnrefError("No NSR with id" % nsr_id)
+
+    @asyncio.coroutine
+    def nsd_unref(self, nsd_id):
+        """ Unref the network service descriptor associated with the id """
+        nsd = self.get_nsd(nsd_id)
+        nsd.unref()
+
+    def get_nsd(self, nsd_id):
+        """ Get network service descriptor for the passed nsd_id"""
+        if nsd_id not in self._nsds:
+            self._log.error("Cannot find NSD id:%s", nsd_id)
+            raise NetworkServiceDescriptorError("Cannot find NSD id:%s", nsd_id)
+
+        return self._nsds[nsd_id]
+
+    def create_nsd(self, nsd_msg):
+        """ Create a network service descriptor """
+        self._log.debug("Create network service descriptor - %s", nsd_msg)
+        if nsd_msg.id in self._nsds:
+            self._log.error("Cannot create NSD %s -NSD ID already exists", nsd_msg)
+            raise NetworkServiceDescriptorError("NSD already exists-%s", nsd_msg.id)
+
+        nsd = NetworkServiceDescriptor(
+                self._dts,
+                self._log,
+                self._loop,
+                nsd_msg,
+                self
+                )
+        self._nsds[nsd_msg.id] = nsd
+
+        return nsd
+
+    def update_nsd(self, nsd):
+        """ update the Network service descriptor """
+        self._log.debug("Update network service descriptor - %s", nsd)
+        if nsd.id not in self._nsds:
+            self._log.debug("No NSD found - creating NSD id = %s", nsd.id)
+            self.create_nsd(nsd)
+        else:
+            self._log.debug("Updating NSD id = %s, nsd = %s", nsd.id, nsd)
+            self._nsds[nsd.id].update(nsd)  
+
+    def delete_nsd(self, nsd_id):
+        """ Delete the Network service descriptor with the passed id """
+        self._log.debug("Deleting the network service descriptor - %s", nsd_id)
+        if nsd_id not in self._nsds:
+            self._log.debug("Delete NSD failed - cannot find nsd-id %s", nsd_id)
+            raise NetworkServiceDescriptorNotFound("Cannot find %s", nsd_id)
+
+        if nsd_id not in self._nsds:
+            self._log.debug("Cannot delete NSD id %s reference exists %s",
+                            nsd_id,
+                            self._nsds[nsd_id].ref_count)
+            raise NetworkServiceDescriptorRefCountExists(
+                "Cannot delete :%s, ref_count:%s",
+                nsd_id,
+                self._nsds[nsd_id].ref_count)
+
+        del self._nsds[nsd_id]
+
+    def get_vnfd_config(self, xact):
+        vnfd_dts_reg = self._vnfd_dts_handler.regh
+        for cfg in vnfd_dts_reg.get_xact_elements(xact):
+            if cfg.id not in self._vnfds:
+                self.create_vnfd(cfg)
+
+    def get_vnfd(self, vnfd_id, xact):
+        """ Get virtual network function descriptor for the passed vnfd_id"""
+        if vnfd_id not in self._vnfds:
+            self._log.error("Cannot find VNFD id:%s", vnfd_id)
+            self.get_vnfd_config(xact)
+
+            if vnfd_id not in self._vnfds:
+                self._log.error("Cannot find VNFD id:%s", vnfd_id)
+                raise VnfDescriptorError("Cannot find VNFD id:%s", vnfd_id)
+
+        return self._vnfds[vnfd_id]
+
+    def create_vnfd(self, vnfd):
+        """ Create a virtual network function descriptor """
+        self._log.debug("Create virtual network function descriptor - %s", vnfd)
+        if vnfd.id in self._vnfds:
+            self._log.error("Cannot create VNFD %s -VNFD ID already exists", vnfd)
+            raise VnfDescriptorError("VNFD already exists-%s", vnfd.id)
+
+        self._vnfds[vnfd.id] = vnfd
+        return self._vnfds[vnfd.id]
+
+    def update_vnfd(self, vnfd):
+        """ Update the virtual network function descriptor """
+        self._log.debug("Update virtual network function descriptor- %s", vnfd)
+
+        # Hack to remove duplicates from leaf-lists - to be fixed by RIFT-6511
+        for ivld in vnfd.internal_vld:
+            ivld.internal_connection_point_ref = list(set(ivld.internal_connection_point_ref))
+
+        if vnfd.id not in self._vnfds:
+            self._log.debug("No VNFD found - creating VNFD id = %s", vnfd.id)
+            self.create_vnfd(vnfd)
+        else:
+            self._log.debug("Updating VNFD id = %s, vnfd = %s", vnfd.id, vnfd)
+            self._vnfds[vnfd.id] = vnfd
+
+    @asyncio.coroutine
+    def delete_vnfd(self, vnfd_id):
+        """ Delete the virtual network function descriptor with the passed id """
+        self._log.debug("Deleting the virtual network function descriptor - %s", vnfd_id)
+        if vnfd_id not in self._vnfds:
+            self._log.debug("Delete VNFD failed - cannot find vnfd-id %s", vnfd_id)
+            raise VnfDescriptorError("Cannot find %s", vnfd_id)
+
+        del self._vnfds[vnfd_id]
+
+    def nsd_in_use(self, nsd_id):
+        """ Is the NSD with the passed id in use """
+        self._log.debug("Is this NSD in use - msg:%s", nsd_id)
+        if nsd_id in self._nsds:
+            return self._nsds[nsd_id].in_use()
+        return False
+
+    @asyncio.coroutine
+    def publish_nsr(self, xact, path, msg):
+        """ Publish a NSR """
+        self._log.debug("Publish NSR with path %s, msg %s",
+                        path, msg)
+        yield from self.nsr_handler.update(xact, path, msg)
+
+    @asyncio.coroutine
+    def unpublish_nsr(self, xact, path):
+        """ Un Publish an NSR """
+        self._log.debug("Publishing delete NSR with path %s", path)
+        yield from self.nsr_handler.delete(path, xact)
+
+    def vnfr_is_ready(self, vnfr_id):
+        """ VNFR with the id is ready """
+        self._log.debug("VNFR id %s ready", vnfr_id)
+        if vnfr_id not in self._vnfds:
+            err = "Did not find VNFR ID with id %s" % vnfr_id
+            self._log.critical("err")
+            raise VirtualNetworkFunctionRecordError(err)
+        self._vnfrs[vnfr_id].is_ready()
+
+    @asyncio.coroutine
+    def get_nsd_refcount(self, nsd_id):
+        """ Get the nsd_list from this NSM"""
+
+        def nsd_refcount_xpath(nsd_id):
+            """ xpath for ref count entry """
+            return (NsdRefCountDtsHandler.XPATH +
+                    "[rw-nsr:nsd-id-ref = '{}']").format(nsd_id)
+
+        nsd_list = []
+        if nsd_id is None or nsd_id == "":
+            for nsd in self._nsds.values():
+                nsd_msg = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount()
+                nsd_msg.nsd_id_ref = nsd.id
+                nsd_msg.instance_ref_count = nsd.ref_count
+                nsd_list.append((nsd_refcount_xpath(nsd.id), nsd_msg))
+        elif nsd_id in self._nsds:
+            nsd_msg = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount()
+            nsd_msg.nsd_id_ref = self._nsds[nsd_id].id
+            nsd_msg.instance_ref_count = self._nsds[nsd_id].ref_count
+            nsd_list.append((nsd_refcount_xpath(nsd_id), nsd_msg))
+
+        return nsd_list
+
+    @asyncio.coroutine
+    def terminate_ns(self, nsr_id, xact):
+        """
+        Terminate network service for the given NSR Id
+        """
+
+        # Terminate the instances/networks assocaited with this nw service
+        self._log.debug("Terminating the network service %s", nsr_id)
+        yield from self._nsrs[nsr_id].terminate()
+
+        # Unref the NSD
+        yield from self.nsd_unref_by_nsr_id(nsr_id)
+
+        # Unpublish the NSR record
+        self._log.debug("Unpublishing the network service %s", nsr_id)
+        yield from self._nsrs[nsr_id].unpublish(xact)
+
+        # Finaly delete the NS instance from this NS Manager
+        self._log.debug("Deletng the network service %s", nsr_id)
+        self.delete_nsr(nsr_id)
+
+
+class NsmRecordsPublisherProxy(object):
+    """ This class provides a publisher interface that allows plugin objects
+        to publish NSR/VNFR/VLR"""
+
+    def __init__(self, dts, log, loop, nsr_pub_hdlr, vnfr_pub_hdlr, vlr_pub_hdlr):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsr_pub_hdlr = nsr_pub_hdlr
+        self._vlr_pub_hdlr = vlr_pub_hdlr
+        self._vnfr_pub_hdlr = vnfr_pub_hdlr
+
+    @asyncio.coroutine
+    def publish_nsr(self, xact, nsr):
+        """ Publish an NSR """
+        path = NetworkServiceRecord.xpath_from_nsr(nsr)
+        return (yield from self._nsr_pub_hdlr.update(xact, path, nsr))
+
+    @asyncio.coroutine
+    def unpublish_nsr(self, xact, nsr):
+        """ Unpublish an NSR """
+        path = NetworkServiceRecord.xpath_from_nsr(nsr)
+        return (yield from self._nsr_pub_hdlr.delete(xact, path))
+
+    @asyncio.coroutine
+    def publish_vnfr(self, xact, vnfr):
+        """ Publish an VNFR """
+        path = VirtualNetworkFunctionRecord.vnfr_xpath(vnfr)
+        return (yield from self._vnfr_pub_hdlr.update(xact, path, vnfr))
+
+    @asyncio.coroutine
+    def unpublish_vnfr(self, xact, vnfr):
+        """ Unpublish a VNFR """
+        path = VirtualNetworkFunctionRecord.vnfr_xpath(vnfr)
+        return (yield from self._vnfr_pub_hdlr.delete(xact, path))
+
+    @asyncio.coroutine
+    def publish_vlr(self, xact, vlr):
+        """ Publish a VLR """
+        path = VirtualLinkRecord.vlr_xpath(vlr)
+        return (yield from self._vlr_pub_hdlr.update(xact, path, vlr))
+
+    @asyncio.coroutine
+    def unpublish_vlr(self, xact, vlr):
+        """ Unpublish a VLR """
+        path = VirtualLinkRecord.vlr_xpath(vlr)
+        return (yield from self._vlr_pub_hdlr.delete(xact, path))
+
+
+class ScalingRpcHandler(mano_dts.DtsHandler):
+    """ The Network service Monitor DTS handler """
+    SCALE_IN_INPUT_XPATH = "I,/nsr:exec-scale-in"
+    SCALE_IN_OUTPUT_XPATH = "O,/nsr:exec-scale-in"
+
+    SCALE_OUT_INPUT_XPATH = "I,/nsr:exec-scale-out"
+    SCALE_OUT_OUTPUT_XPATH = "O,/nsr:exec-scale-out"
+
+    ACTION = Enum('ACTION', 'SCALE_IN SCALE_OUT')
+
+    def __init__(self, log, dts, loop, callback=None):
+        super().__init__(log, dts, loop)
+        self.callback = callback
+        self.last_instance_id = defaultdict(int)
+
+    @asyncio.coroutine
+    def register(self):
+
+        @asyncio.coroutine
+        def on_scale_in_prepare(xact_info, action, ks_path, msg):
+            assert action == rwdts.QueryAction.RPC
+
+            try:
+                if self.callback:
+                    self.callback(xact_info.xact, msg, self.ACTION.SCALE_IN)
+
+                rpc_op = NsrYang.YangOutput_Nsr_ExecScaleIn.from_dict({
+                      "instance_id": msg.instance_id})
+
+                xact_info.respond_xpath(
+                    rwdts.XactRspCode.ACK,
+                    self.__class__.SCALE_IN_OUTPUT_XPATH,
+                    rpc_op)
+
+            except Exception as e:
+                self.log.exception(e)
+                xact_info.respond_xpath(
+                    rwdts.XactRspCode.NACK,
+                    self.__class__.SCALE_IN_OUTPUT_XPATH)
+
+        @asyncio.coroutine
+        def on_scale_out_prepare(xact_info, action, ks_path, msg):
+            assert action == rwdts.QueryAction.RPC
+
+            try:
+                scaling_group = msg.scaling_group_name_ref
+                if not msg.instance_id:
+                    last_instance_id = self.last_instance_id[scale_group]
+                    msg.instance_id  = last_instance_id + 1
+                    self.last_instance_id[scale_group] += 1
+
+                if self.callback:
+                    self.callback(xact_info.xact, msg, self.ACTION.SCALE_OUT)
+
+                rpc_op = NsrYang.YangOutput_Nsr_ExecScaleOut.from_dict({
+                      "instance_id": msg.instance_id})
+
+                xact_info.respond_xpath(
+                    rwdts.XactRspCode.ACK,
+                    self.__class__.SCALE_OUT_OUTPUT_XPATH,
+                    rpc_op)
+
+            except Exception as e:
+                self.log.exception(e)
+                xact_info.respond_xpath(
+                      rwdts.XactRspCode.NACK,
+                      self.__class__.SCALE_OUT_OUTPUT_XPATH)
+
+        scale_in_hdl = rift.tasklets.DTS.RegistrationHandler(
+              on_prepare=on_scale_in_prepare)
+        scale_out_hdl = rift.tasklets.DTS.RegistrationHandler(
+              on_prepare=on_scale_out_prepare)
+
+        with self.dts.group_create() as group:
+            group.register(
+                  xpath=self.__class__.SCALE_IN_INPUT_XPATH,
+                  handler=scale_in_hdl,
+                  flags=rwdts.Flag.PUBLISHER)
+            group.register(
+                  xpath=self.__class__.SCALE_OUT_INPUT_XPATH,
+                  handler=scale_out_hdl,
+                  flags=rwdts.Flag.PUBLISHER)
+
+
+class NsmTasklet(rift.tasklets.Tasklet):
+    """
+    The network service manager  tasklet
+    """
+    def __init__(self, *args, **kwargs):
+        super(NsmTasklet, self).__init__(*args, **kwargs)
+        self.rwlog.set_category("rw-mano-log")
+        self.rwlog.set_subcategory("nsm")
+
+        self._dts = None
+        self._nsm = None
+
+        self._ro_plugin_selector = None
+        self._vnffgmgr = None
+
+        self._nsr_handler = None
+        self._vnfr_pub_handler = None
+        self._vlr_pub_handler = None
+        self._vnfd_pub_handler = None
+        self._scale_cfg_handler = None
+
+        self._records_publisher_proxy = None
+
+    def start(self):
+        """ The task start callback """
+        super(NsmTasklet, self).start()
+        self.log.info("Starting NsmTasklet")
+
+        self.log.debug("Registering with dts")
+        self._dts = rift.tasklets.DTS(self.tasklet_info,
+                                      RwNsmYang.get_schema(),
+                                      self.loop,
+                                      self.on_dts_state_change)
+
+        self.log.debug("Created DTS Api GI Object: %s", self._dts)
+
+    def stop(self):
+        try:
+            self._dts.deinit()
+        except Exception:
+            print("Caught Exception in NSM stop:", sys.exc_info()[0])
+            raise
+
+    def on_instance_started(self):
+        """ Task instance started callback """
+        self.log.debug("Got instance started callback")
+
+    @asyncio.coroutine
+    def init(self):
+        """ Task init callback """
+        self.log.debug("Got instance started callback")
+
+        self.log.debug("creating config account handler")
+
+        self._nsr_pub_handler = publisher.NsrOpDataDtsHandler(self._dts, self.log, self.loop)
+        yield from self._nsr_pub_handler.register()
+
+        self._vnfr_pub_handler = publisher.VnfrPublisherDtsHandler(self._dts, self.log, self.loop)
+        yield from self._vnfr_pub_handler.register()
+
+        self._vlr_pub_handler = publisher.VlrPublisherDtsHandler(self._dts, self.log, self.loop)
+        yield from self._vlr_pub_handler.register()
+
+        manifest = self.tasklet_info.get_pb_manifest()
+        use_ssl = manifest.bootstrap_phase.rwsecurity.use_ssl
+        ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
+        ssl_key = manifest.bootstrap_phase.rwsecurity.key
+
+        self._vnfd_pub_handler = publisher.VnfdPublisher(use_ssl, ssl_cert, ssl_key, self.loop)
+
+        self._records_publisher_proxy = NsmRecordsPublisherProxy(
+                self._dts,
+                self.log,
+                self.loop,
+                self._nsr_pub_handler,
+                self._vnfr_pub_handler,
+                self._vlr_pub_handler,
+                )
+
+        # Register the NSM to receive the nsm plugin
+        # when cloud account is configured
+        self._ro_plugin_selector = cloud.ROAccountPluginSelector(
+                self._dts,
+                self.log,
+                self.loop,
+                self._records_publisher_proxy,
+                )
+        yield from self._ro_plugin_selector.register()
+
+        self._cloud_account_handler = cloud.CloudAccountConfigSubscriber(
+                self._log,
+                self._dts,
+                self.log_hdl)
+
+        yield from self._cloud_account_handler.register()
+
+        self._vnffgmgr = rwvnffgmgr.VnffgMgr(self._dts, self.log, self.log_hdl, self.loop)
+        yield from self._vnffgmgr.register()
+
+        self._nsm = NsManager(
+                self._dts,
+                self.log,
+                self.loop,
+                self._nsr_pub_handler,
+                self._vnfr_pub_handler,
+                self._vlr_pub_handler,
+                self._ro_plugin_selector,
+                self._vnffgmgr,
+                self._vnfd_pub_handler,
+                self._cloud_account_handler
+                )
+
+        yield from self._nsm.register()
+
+    @asyncio.coroutine
+    def run(self):
+        """ Task run callback """
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Take action according to current dts state to transition
+        application into the corresponding application state
+
+        Arguments
+            state - current dts state
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self.log.debug("Changing state to %s", next_state)
+            self._dts.handle.set_state(next_state)
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py
new file mode 100755
index 0000000..0ebe9df
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py
@@ -0,0 +1,422 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwsdnYang,
+    RwTypes,
+    ProtobufC,
+)
+
+from gi.repository.RwTypes import RwStatus
+import rw_peas
+import rift.tasklets
+
+class SdnGetPluginError(Exception):
+    """ Error while fetching SDN plugin """
+    pass
+
+
+class SdnGetInterfaceError(Exception):
+    """ Error while fetching SDN interface"""
+    pass
+
+
+class SdnAccountError(Exception):
+    """ Error while creating/deleting/updating SDN Account"""
+    pass
+
+class VnffgrDoesNotExist(Exception):
+    """ Error while fetching SDN interface"""
+    pass
+
+class VnffgrAlreadyExist(Exception):
+    """ Vnffgr already exists Error"""
+    pass
+
+class VnffgrCreationFailed(Exception):
+    """ Error while creating VNFFGR"""
+    pass
+
+
+class VnffgrUpdateFailed(Exception):
+    """ Error while updating VNFFGR"""
+    pass
+
+class VnffgMgr(object):
+    """ Implements the interface to backend plugins to fetch topology """
+    def __init__(self, dts, log, log_hdl, loop):
+        self._account = {}
+        self._dts = dts
+        self._log = log
+        self._log_hdl = log_hdl
+        self._loop = loop
+        self._sdn = {}
+        self._sdn_handler = SDNAccountDtsHandler(self._dts,self._log,self)
+        self._vnffgr_list = {}
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self._sdn_handler.register()
+
+    def set_sdn_account(self,account):
+        if (account.name in self._account):
+            self._log.error("SDN Account is already set")
+        else:
+            sdn_account           = RwsdnYang.SDNAccount()
+            sdn_account.from_dict(account.as_dict())
+            sdn_account.name = account.name
+            self._account[account.name] = sdn_account
+            self._log.debug("Account set is %s , %s",type(self._account), self._account)
+
+    def del_sdn_account(self, name):
+        self._log.debug("Account deleted is %s , %s", type(self._account), name)
+        del self._account[name]
+
+    def update_sdn_account(self,account):
+        self._log.debug("Account updated is %s , %s", type(self._account), account)
+        if account.name in self._account:
+            sdn_account = self._account[account.name]
+
+            sdn_account.from_dict(
+                account.as_dict(),
+                ignore_missing_keys=True,
+                )
+            self._account[account.name] = sdn_account
+
+    def get_sdn_account(self, name):
+        """
+        Creates an object for class RwsdnYang.SdnAccount()
+        """
+        if (name in self._account):
+            return self._account[name]
+        else:
+            self._log.error("SDN account is not configured")
+
+
+    def get_sdn_plugin(self,name):
+        """
+        Loads rw.sdn plugin via libpeas
+        """
+        if (name in self._sdn):
+            return self._sdn[name]
+        account = self.get_sdn_account(name)
+        plugin_name = getattr(account, account.account_type).plugin_name
+        self._log.debug("SDN plugin being created")
+        plugin = rw_peas.PeasPlugin(plugin_name, 'RwSdn-1.0')
+        engine, info, extension = plugin()
+
+        self._sdn[name] = plugin.get_interface("Topology")
+        try:
+            rc = self._sdn[name].init(self._log_hdl)
+            assert rc == RwStatus.SUCCESS
+        except:
+            self._log.error("ERROR:SDN plugin instantiation failed ")
+        else:
+            self._log.debug("SDN plugin successfully instantiated")
+        return self._sdn[name]
+
+    def fetch_vnffgr(self,vnffgr_id):
+        if vnffgr_id not in self._vnffgr_list:
+            self._log.error("VNFFGR with id %s not present in VNFFGMgr", vnffgr_id)
+            msg = "VNFFGR with id {} not present in VNFFGMgr".format(vnffgr_id)
+            raise VnffgrDoesNotExist(msg)
+        self.update_vnffgrs(self._vnffgr_list[vnffgr_id].sdn_account)
+        vnffgr = self._vnffgr_list[vnffgr_id].deep_copy()
+        self._log.debug("VNFFGR for id %s is %s",vnffgr_id,vnffgr)
+        return vnffgr
+
+    def create_vnffgr(self, vnffgr,classifier_list,sff_list):
+        """
+        """
+        self._log.debug("Received VNFFG chain Create msg %s",vnffgr)
+        if vnffgr.id in self._vnffgr_list:
+            self._log.error("VNFFGR with id %s already present in VNFFGMgr", vnffgr.id)
+            vnffgr.operational_status = 'failed'
+            msg = "VNFFGR with id {} already present in VNFFGMgr".format(vnffgr.id)
+            raise VnffgrAlreadyExist(msg)
+
+        self._vnffgr_list[vnffgr.id] = vnffgr
+        vnffgr.operational_status = 'init'
+        if len(self._account) == 0:
+            self._log.error("SDN Account not configured")
+            vnffgr.operational_status = 'failed'
+            return
+        if vnffgr.sdn_account:
+            sdn_acct_name = vnffgr.sdn_account
+        else:
+            self._log.error("SDN Account is not associated to create VNFFGR")
+            # TODO Fail the VNFFGR creation if SDN account is not associated
+            #vnffgr.operational_status = 'failed'
+            #msg = "SDN Account is not associated to create VNFFGR"
+            #raise VnffgrCreationFailed(msg)
+            sdn_account = [sdn_account.name for _,sdn_account in self._account.items()]
+            sdn_acct_name = sdn_account[0]
+            vnffgr.sdn_account = sdn_acct_name
+        sdn_plugin = self.get_sdn_plugin(sdn_acct_name)
+
+        for rsp in vnffgr.rsp:
+            vnffg = RwsdnYang.VNFFGChain()
+            vnffg.name = rsp.name
+            vnffg.classifier_name = rsp.classifier_name
+
+            vnfr_list = list()
+            for index,cp_ref in enumerate(rsp.vnfr_connection_point_ref):
+                cpath = vnffg.vnf_chain_path.add()
+                cpath.order=cp_ref.hop_number
+                cpath.service_function_type = cp_ref.service_function_type
+                cpath.nsh_aware=True
+                cpath.transport_type = 'vxlan-gpe'
+
+                vnfr=cpath.vnfr_ids.add()
+                vnfr.vnfr_id = cp_ref.vnfr_id_ref
+                vnfr.vnfr_name = cp_ref.vnfr_name_ref
+                vnfr.mgmt_address = cp_ref.connection_point_params.mgmt_address
+                vnfr.mgmt_port = 5000
+                vnfr_list.append(vnfr)
+            
+                vdu = vnfr.vdu_list.add()
+                vdu.name = cp_ref.connection_point_params.name
+                vdu.port_id = cp_ref.connection_point_params.port_id
+                vdu.vm_id = cp_ref.connection_point_params.vm_id
+                vdu.address = cp_ref.connection_point_params.address
+                vdu.port =  cp_ref.connection_point_params.port
+
+            for sff in sff_list.values():
+                _sff = vnffg.sff.add()
+                _sff.from_dict(sff.as_dict())
+                if sff.function_type == 'SFF':
+                    for vnfr in vnfr_list:
+                        vnfr.sff_name = sff.name
+                self._log.debug("Recevied SFF %s, Created SFF is %s",sff, _sff)
+
+            self._log.debug("VNFFG chain msg is %s",vnffg)
+            rc,rs = sdn_plugin.create_vnffg_chain(self._account[sdn_acct_name],vnffg)
+            if rc != RwTypes.RwStatus.SUCCESS:
+                vnffgr.operational_status = 'failed'
+                msg = "Instantiation of VNFFGR with id {} failed".format(vnffgr.id)
+                raise VnffgrCreationFailed(msg)
+
+            self._log.info("VNFFG chain created successfully for rsp with id %s",rsp.id)
+
+
+        meta = {}
+        if(len(classifier_list) == 2):
+            meta[vnffgr.classifier[0].id] = '0x' + ''.join(str("%0.2X"%int(i)) for i in vnffgr.classifier[1].ip_address.split('.'))
+            meta[vnffgr.classifier[1].id] = '0x' + ''.join(str("%0.2X"%int(i)) for i in vnffgr.classifier[0].ip_address.split('.'))
+            
+        self._log.debug("VNFFG Meta VNFFG chain is {}".format(meta))
+        
+        for classifier in classifier_list:
+            vnffgr_cl = [_classifier  for _classifier in vnffgr.classifier if classifier.id == _classifier.id]
+            if len(vnffgr_cl) > 0:
+                cl_rsp_name = vnffgr_cl[0].rsp_name
+            else:
+                self._log.error("No RSP wiht name %s found; Skipping classifier %s creation",classifier.rsp_id_ref,classifier.name)
+                continue
+            vnffgcl = RwsdnYang.VNFFGClassifier()
+            vnffgcl.name = classifier.name
+            vnffgcl.rsp_name = cl_rsp_name
+            vnffgcl.port_id = vnffgr_cl[0].port_id
+            vnffgcl.vm_id = vnffgr_cl[0].vm_id
+            # Get the symmetric classifier endpoint ip and set it in nsh ctx1
+            
+            vnffgcl.vnffg_metadata.ctx1 =  meta.get(vnffgr_cl[0].id,'0') 
+            vnffgcl.vnffg_metadata.ctx2 = '0'
+            vnffgcl.vnffg_metadata.ctx3 = '0'
+            vnffgcl.vnffg_metadata.ctx4 = '0'
+            if vnffgr_cl[0].has_field('sff_name'):
+                vnffgcl.sff_name = vnffgr_cl[0].sff_name
+            for index,match_rule in enumerate(classifier.match_attributes):
+                acl = vnffgcl.match_attributes.add()
+                #acl.name = vnffgcl.name + str(index)
+                acl.name = match_rule.id
+                acl.ip_proto  = match_rule.ip_proto
+                acl.source_ip_address = match_rule.source_ip_address + '/32'
+                acl.source_port = match_rule.source_port
+                acl.destination_ip_address = match_rule.destination_ip_address + '/32'
+                acl.destination_port = match_rule.destination_port
+
+            self._log.debug(" Creating VNFFG Classifier Classifier %s for RSP: %s",vnffgcl.name,vnffgcl.rsp_name)
+            rc,rs = sdn_plugin.create_vnffg_classifier(self._account[sdn_acct_name],vnffgcl)
+            if rc != RwTypes.RwStatus.SUCCESS:
+                self._log.error("VNFFG Classifier cretaion failed for Classifier %s for RSP ID: %s",classifier.name,classifier.rsp_id_ref)
+                #vnffgr.operational_status = 'failed'
+                #msg = "Instantiation of VNFFGR with id {} failed".format(vnffgr.id)
+                #raise VnffgrCreationFailed(msg)
+
+        vnffgr.operational_status = 'running'
+        self.update_vnffgrs(vnffgr.sdn_account)
+        return vnffgr
+
+    def update_vnffgrs(self,sdn_acct_name):
+        """
+        Update VNFFGR by reading data from SDN Plugin
+        """
+        sdn_plugin = self.get_sdn_plugin(sdn_acct_name)
+        rc,rs = sdn_plugin.get_vnffg_rendered_paths(self._account[sdn_acct_name])
+        if rc != RwTypes.RwStatus.SUCCESS:
+            msg = "Reading of VNFFGR from SDN Plugin failed"
+            raise VnffgrUpdateFailed(msg)
+
+        vnffgr_list = [_vnffgr for _vnffgr in self._vnffgr_list.values()  if _vnffgr.sdn_account == sdn_acct_name and _vnffgr.operational_status == 'running']
+
+        for _vnffgr in vnffgr_list:
+            for _vnffgr_rsp in _vnffgr.rsp:
+                vnffg_rsp_list = [vnffg_rsp for vnffg_rsp in rs.vnffg_rendered_path if vnffg_rsp.name == _vnffgr_rsp.name]
+                if vnffg_rsp_list is not None and len(vnffg_rsp_list) > 0:
+                    vnffg_rsp = vnffg_rsp_list[0]
+                    if len(vnffg_rsp.rendered_path_hop) != len(_vnffgr_rsp.vnfr_connection_point_ref):
+                        _vnffgr.operational_status = 'failed'
+                        self._log.error("Received hop count %d doesnt match the VNFFGD hop count %d", len(vnffg_rsp.rendered_path_hop),
+                                         len(_vnffgr_rsp.vnfr_connection_point_ref))
+                        msg = "Fetching of VNFFGR with id {} failed".format(_vnffgr.id)
+                        raise VnffgrUpdateFailed(msg)
+                    _vnffgr_rsp.path_id =  vnffg_rsp.path_id
+                    for index, rendered_hop in enumerate(vnffg_rsp.rendered_path_hop):
+                        for  vnfr_cp_ref in _vnffgr_rsp.vnfr_connection_point_ref:
+                            if rendered_hop.vnfr_name == vnfr_cp_ref.vnfr_name_ref:
+                               vnfr_cp_ref.hop_number = rendered_hop.hop_number
+                               vnfr_cp_ref.service_index = rendered_hop.service_index
+                               vnfr_cp_ref.service_function_forwarder.name = rendered_hop.service_function_forwarder.name
+                               vnfr_cp_ref.service_function_forwarder.ip_address = rendered_hop.service_function_forwarder.ip_address
+                               vnfr_cp_ref.service_function_forwarder.port = rendered_hop.service_function_forwarder.port
+                else:
+                    _vnffgr.operational_status = 'failed'
+                    self._log.error("VNFFGR RSP with name %s in VNFFG %s not found",_vnffgr_rsp.name, _vnffgr.id)
+                    msg = "Fetching of VNFFGR with name {} failed".format(_vnffgr_rsp.name)
+                    raise VnffgrUpdateFailed(msg)
+
+
+    def terminate_vnffgr(self,vnffgr_id,sdn_account_name = None):
+        """
+        Deletet the VNFFG chain
+        """
+        if vnffgr_id not in self._vnffgr_list:
+            self._log.error("VNFFGR with id %s not present in VNFFGMgr during termination", vnffgr_id)
+            msg = "VNFFGR with id {} not present in VNFFGMgr during termination".format(vnffgr_id)
+            return
+            #raise VnffgrDoesNotExist(msg)
+        self._log.info("Received VNFFG chain terminate for id %s",vnffgr_id)
+        if sdn_account_name is None:
+            sdn_account = [sdn_account.name for _,sdn_account in self._account.items()]
+            sdn_account_name = sdn_account[0]
+        sdn_plugin = self.get_sdn_plugin(sdn_account_name)
+        sdn_plugin.terminate_vnffg_chain(self._account[sdn_account_name],vnffgr_id)
+        sdn_plugin.terminate_vnffg_classifier(self._account[sdn_account_name],vnffgr_id)
+        del self._vnffgr_list[vnffgr_id]
+
+class SDNAccountDtsHandler(object):
+    XPATH = "C,/rw-sdn:sdn-account"
+
+    def __init__(self, dts, log, parent):
+        self._dts = dts
+        self._log = log
+        self._parent = parent
+
+        self._sdn_account = {}
+
+    def _set_sdn_account(self, account):
+        self._log.info("Setting sdn account: {}".format(account))
+        if account.name in self._sdn_account:
+            self._log.error("SDN Account with name %s already exists. Ignoring config", account.name);
+        self._sdn_account[account.name]  = account
+        self._parent.set_sdn_account(account)
+
+    def _del_sdn_account(self, account_name):
+        self._log.info("Deleting sdn account: {}".format(account_name))
+        del self._sdn_account[account_name]
+
+        self._parent.del_sdn_account(account_name)
+
+    def _update_sdn_account(self, account):
+        self._log.info("Updating sdn account: {}".format(account))
+        # No need to update locally saved sdn_account's updated fields, as they
+        # are not used anywhere. Call the parent's update callback.
+        self._parent.update_sdn_account(account)
+
+    @asyncio.coroutine
+    def register(self):
+        def apply_config(dts, acg, xact, action, _):
+            self._log.debug("Got sdn account apply config (xact: %s) (action: %s)", xact, action)
+            if action == rwdts.AppconfAction.INSTALL and xact.id is None:
+                self._log.debug("No xact handle.  Skipping apply config")
+                return RwTypes.RwStatus.SUCCESS
+
+            return RwTypes.RwStatus.SUCCESS
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ Prepare callback from DTS for SDN Account config """
+
+            self._log.info("SDN Cloud account config received: %s", msg)
+
+            fref = ProtobufC.FieldReference.alloc()
+            fref.goto_whole_message(msg.to_pbcm())
+
+            if fref.is_field_deleted():
+                # Delete the sdn account record
+                self._del_sdn_account(msg.name)
+            else:
+                # If the account already exists, then this is an update.
+                if msg.name in self._sdn_account:
+                    self._log.debug("SDN account already exists. Invoking on_prepare update request")
+                    if msg.has_field("account_type"):
+                        errmsg = "Cannot update SDN account's account-type."
+                        self._log.error(errmsg)
+                        xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+                                                   SDNAccountDtsHandler.XPATH,
+                                                   errmsg)
+                        raise SdnAccountError(errmsg)
+
+                    # Update the sdn account record
+                    self._update_sdn_account(msg)
+                else:
+                    self._log.debug("SDN account does not already exist. Invoking on_prepare add request")
+                    if not msg.has_field('account_type'):
+                        errmsg = "New SDN account must contain account-type field."
+                        self._log.error(errmsg)
+                        xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+                                                   SDNAccountDtsHandler.XPATH,
+                                                   errmsg)
+                        raise SdnAccountError(errmsg)
+
+                    # Set the sdn account record
+                    self._set_sdn_account(msg)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+
+        self._log.debug("Registering for Sdn Account config using xpath: %s",
+                        SDNAccountDtsHandler.XPATH,
+                        )
+
+        acg_handler = rift.tasklets.AppConfGroup.Handler(
+                        on_apply=apply_config,
+                        )
+
+        with self._dts.appconf_group_create(acg_handler) as acg:
+            acg.register(
+                    xpath=SDNAccountDtsHandler.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
+                    on_prepare=on_prepare
+                    )
+
+
+
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/scale_group.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/scale_group.py
new file mode 100644
index 0000000..8bbf894
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/scale_group.py
@@ -0,0 +1,280 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import time
+
+from enum import Enum
+
+from gi.repository import NsdYang, NsrYang
+
+
+class ScalingGroupIndexExists(Exception):
+    pass
+
+
+class ScaleGroupTrigger(Enum):
+    """ Trigger for scaling config """
+    PRE_SCALE_IN = 1
+    POST_SCALE_IN = 2
+    PRE_SCALE_OUT = 3
+    POST_SCALE_OUT = 4
+
+
+class ScaleGroupState(Enum):
+    """ Scaling group state  """
+    RUNNING = 1
+    SCALING_IN = 2
+    SCALING_OUT = 3
+
+
+class ScalingGroup(object):
+    """ This represents a configured NSR scaling group """
+    def __init__(self, log, group_msg):
+        """ Create a ScalingGroup instance
+
+        This class is responsible for representing a configured scaling group
+        which is present within an NSR.
+
+        :param log: A logger instance
+        :param group_msg: A NSD scaling group pb message
+        """
+        self._log = log
+        self._group_msg = group_msg
+
+        self._instances = {}
+
+    def __str__(self):
+        return "ScalingGroup(%s)" % self.name
+
+    @property
+    def name(self):
+        """ Name of the scaling group """
+        return self._group_msg.name
+
+    @property
+    def state(self):
+        """ State of the scaling group """
+        state = ScaleGroupState.RUNNING
+        for instance in self._instances.values():
+            if instance.operational_status in ["init", "vnf_init_phase"]:
+                self._log.debug("Scaling instance %s in scaling-out state: %s",
+                                instance, instance.operational_status)
+                state = ScaleGroupState.SCALING_OUT
+
+            elif instance.operational_status in ["terminate", "vnf_terminate_phase"]:
+                self._log.debug("Scaling instance %s in scaling-in state: %s",
+                                instance, instance.operational_status)
+                state = ScaleGroupState.SCALING_IN
+
+        return state
+
+    @property
+    def vnf_index_count_map(self):
+        """ The mapping of member_vnf_index_ref to count"""
+        return {mbr.member_vnf_index_ref: mbr.count for mbr in self._group_msg.vnfd_member}
+
+    @property
+    def group_msg(self):
+        """ Return the scale group PB message """
+        return self._group_msg
+
+    @property
+    def min_instance_count(self):
+        """ Minimum (and default) number of instance of the scaling group """
+        return self._group_msg.min_instance_count
+
+    @property
+    def max_instance_count(self):
+        """ Maximum number of instance of the scaling group """
+        return self._group_msg.max_instance_count
+
+    def create_record_msg(self):
+        """ Returns a NSR Scaling group record """
+        msg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ScalingGroupRecord(
+                scaling_group_name_ref=self.name,
+                )
+
+        for instance in self.instances:
+            msg.instance.append(instance.create_record_msg())
+
+        return msg
+
+    @property
+    def instances(self):
+        return self._instances.values()
+
+    def get_instance(self, instance_id):
+        """ Get a scaling group instance
+
+        :param instance_id: The instance's instance_id
+        """
+        return self._instances[instance_id]
+
+    def create_instance(self, instance_id, is_default=False):
+        """ Create a scaling group instance
+
+        :param instance_id: The new instance's instance_id
+        """
+        self._log.debug("Creating %s instance instance_id %s ", self, instance_id)
+
+        if instance_id in self._instances:
+            raise ScalingGroupIndexExists("%s instance_id %s already exists" % (self, instance_id))
+
+        instance = ScalingGroupInstance(
+                log=self._log,
+                group_name=self.name,
+                instance_id=instance_id,
+                is_default=is_default,
+                )
+
+        self._instances[instance_id] = instance
+
+        return instance
+
+    def delete_instance(self, instance_id):
+        self._log.debug("Deleting %s instance instance_id %s ", self, instance_id)
+        del self._instances[instance_id]
+
+    def trigger_map(self, trigger):
+        trig_map = {
+            NsdYang.ScalingTrigger.PRE_SCALE_IN   : 'pre_scale_in',
+            NsdYang.ScalingTrigger.POST_SCALE_IN  : 'post_scale_in',
+            NsdYang.ScalingTrigger.PRE_SCALE_OUT  : 'pre_scale_out',
+            NsdYang.ScalingTrigger.POST_SCALE_OUT : 'post_scale_out',
+        }
+
+        try:
+            return trig_map[trigger]
+        except Exception as e:
+            self._log.error("Unknown scaling group trigger passed: {}".format(trigger))
+            self._log.exception(e)
+
+    def trigger_config(self, trigger):
+        """ Get the config action for the trigger """
+        self._log.debug("Trigger config {}: {}".format(trigger, self._group_msg))
+        trig = self.trigger_map(trigger)
+        if trig is None:
+            return
+
+        for config in self._group_msg.scaling_config_action:
+            if trig == config.trigger:
+                return config
+
+
+class ScalingGroupInstance(object):
+    """  This class represents a configured NSR Scaling Group instance"""
+
+    valid_status_list = (
+      "init",
+      "vnf_init_phase",
+      "running",
+      "terminate",
+      "vnf_terminate_phase",
+      "terminated",
+      "failed",
+      )
+
+    valid_config_status_list = (
+        "configuring",
+        "configured",
+        "failed",
+    )
+
+    def __init__(self, log, group_name, instance_id, is_default=False):
+        self._log = log
+        self._group_name = group_name
+        self._instance_id = instance_id
+        self._is_default = is_default
+
+        self._vnfrs = {}
+
+        self._create_time = int(time.time())
+        self._op_status = "init"
+        self._config_status = "configuring"
+        self._config_err_msg = None
+
+    def __str__(self):
+        return "ScalingGroupInstance(%s #%s)" % (self._group_name, self.instance_id)
+
+    @property
+    def operational_status(self):
+        return self._op_status
+
+    @operational_status.setter
+    def operational_status(self, op_status):
+        if op_status not in ScalingGroupInstance.valid_status_list:
+            raise ValueError("Invalid scaling group instance status: %s", op_status)
+
+        self._op_status = op_status
+
+    @property
+    def config_status(self):
+        return self._config_status
+
+    @config_status.setter
+    def config_status(self, status):
+        if status not in ScalingGroupInstance.valid_config_status_list:
+            raise ValueError("%s, invalid status: %s",
+                             self, status)
+
+        self._config_status = status
+
+    @property
+    def config_err_msg(self):
+        return self._config_err_msg
+
+    @config_err_msg.setter
+    def config_err_msg(self, msg):
+        if self.config_err_msg is not None:
+            self._log.info("%s, overwriting previous config error msg '%s' with '%s'",
+                           self, self.config_err_msg, msg)
+
+        self._config_err_msg = msg
+
+    @property
+    def instance_id(self):
+        return self._instance_id
+
+    @property
+    def is_default(self):
+        return self._is_default
+
+    @property
+    def vnfrs(self):
+        """ Return all VirtualNetworkFunctionRecord's that have been added"""
+        return self._vnfrs.values()
+
+    def create_record_msg(self):
+        msg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ScalingGroupRecord_Instance(
+                instance_id=self._instance_id,
+                create_time=self._create_time,
+                op_status=self._op_status,
+                config_status=self._config_status,
+                error_msg=self._config_err_msg,
+                is_default=self._is_default
+                )
+
+        for vnfr in self.vnfrs:
+            msg.vnfrs.append(vnfr.id)
+
+        return msg
+
+    def add_vnfr(self, vnfr):
+        """ Add a VirtualNetworkFunctionRecord"""
+        self._log.debug("Added %s to %s", vnfr, self)
+        self._vnfrs[vnfr.id] = vnfr
+
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/so_endpoint_cfg.xml b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/so_endpoint_cfg.xml
new file mode 100644
index 0000000..ef09f1e
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/so_endpoint_cfg.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="1">
+  <data>
+    <cm-config xmlns="http://riftio.com/ns/riftware-1.0/rw-conman">
+      <initiate-nsr-cfg></initiate-nsr-cfg>
+    </cm-config>
+  </data>
+</rpc-reply>
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/xpath.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/xpath.py
new file mode 100755
index 0000000..11b7127
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/xpath.py
@@ -0,0 +1,365 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import re
+
+
+class Attribute(collections.namedtuple("Attribute", "module name")):
+    def __repr__(self):
+        return "{}:{}".format(self.module, self.name)
+
+
+class ListElement(collections.namedtuple("List", "module name key value")):
+    def __repr__(self):
+        return "{}:{}[{}={}]".format(self.module, self.name, self.key, self.value)
+
+
+def tokenize(xpath):
+    """Return a list of tokens representing an xpath
+
+    The types of xpaths that this selector supports is extremely limited.
+    The xpath is required to be an absolute path delimited by a
+    forward-slash. Each of the parts (elements between delimiters) is
+    treated as one of two possible types:
+
+        - an attribute
+        - a list element
+
+    An attribute is a normal python attribute on an object. A list element
+    is an element within a list, which is identified by a key value (like a
+    yang list, although this is more properly a dict in python).
+
+    Each attribute is expected to have the form,
+
+        <namespace>:<variable-name>
+
+    A valid variable name (or namespace) follows the python regular expression,
+
+        [a-zA-Z0-9-_]+
+
+    A list entry has the form,
+
+        <namespace>:<variable-name>[<namespace>:<variable-name>=<value>]
+
+    The expression in the square brackets is the key of the required
+    element, and the value that that key must have.
+
+    Arguments:
+        xpath - a string containing an xpath expression
+
+    Raises:
+        A ValueError is raised if the xpath cannot be parsed.
+
+    Returns:
+        a list of tokens
+
+    """
+    # define the symbols that are valid for a variable name in yang
+    name = "[a-zA-Z0-9-_]+"
+
+    # define a set of regular expressions for parsing the xpath
+    pattern_attribute = re.compile("({t}):({t})$".format(t=name))
+    pattern_key_value = re.compile("^{t}:({t})\s*=\s*(.*)$".format(t=name))
+    pattern_quote = re.compile("^[\'\"](.*)[\'\"]$")
+    pattern_list = re.compile("^(.*)\[(.*)\]$")
+
+    def dash_to_underscore(text):
+        return text.replace('-', '_')
+
+    # Iterate through the parts of the xpath (NB: because the xpaths are
+    # required to be absolute paths, the first character is going to be the
+    # forward slash. As a result, when the string is split, the first
+    # element with be an empty string).
+    tokens = list()
+    for part in xpath.split("/")[1:]:
+
+        # Test the part to see if it is a attribute
+        result = pattern_attribute.match(part)
+        if result is not None:
+            module, name = result.groups()
+
+            # Convert the dashes to underscores
+            name = dash_to_underscore(name)
+            module = dash_to_underscore(module)
+
+            tokens.append(Attribute(module, name))
+
+            continue
+
+        # Test the part to see if it is a list
+        result = pattern_list.match(part)
+        if result is not None:
+            attribute, keyvalue = result.groups()
+
+            module, name = pattern_attribute.match(attribute).groups()
+            key, value = pattern_key_value.match(keyvalue).groups()
+
+            # Convert the dashes to underscore (but not in the key value)
+            key = dash_to_underscore(key)
+            name = dash_to_underscore(name)
+            module = dash_to_underscore(module)
+
+            result = pattern_quote.match(value)
+            if result is not None:
+                value = result.group(1)
+
+            tokens.append(ListElement(module, name, key, value))
+
+            continue
+
+        raise ValueError("cannot parse '{}'".format(part))
+
+    return tokens
+
+
+class XPathAttribute(object):
+    """
+    This class is used to represent a reference to an attribute. If you use
+    getattr on an attribute, it may give you the value of the attribute rather
+    than a reference to it. What is really wanted is a representation of the
+    attribute so that its value can be both retrieved and set. That is what
+    this class provides.
+    """
+
+    def __init__(self, obj, name):
+        """Create an instance of XPathAttribute
+
+        Arguments:
+            obj  - the object containing the attribute
+            name - the name of an attribute
+
+        Raises:
+            A ValueError is raised if the provided object does not have the
+            associated attribute.
+
+        """
+        if not hasattr(obj, name):
+            msg = "The provided object does not contain the associated attribute"
+            raise ValueError(msg)
+
+        self.obj = obj
+        self.name = name
+
+    def __repr__(self):
+        return self.value
+
+    @property
+    def value(self):
+        return getattr(self.obj, self.name)
+
+    @value.setter
+    def value(self, value):
+        """Set the value of the attribute
+
+        Arguments:
+            value - the new value that the attribute should take
+
+        Raises:
+            An TypeError is raised if the provided value cannot be cast the
+            current type of the attribute.
+
+        """
+        attr_type = type(self.value)
+        attr_value = value
+
+        # The only way we can currently get the type of the atrribute is if it
+        # has an existing value. So if the attribute has an existing value,
+        # cast the value to the type of the attribute value.
+        if attr_type is not type(None):
+            try:
+                attr_value = attr_type(attr_value)
+
+            except ValueError:
+                msg = "expected type '{}', but got '{}' instead"
+                raise TypeError(msg.format(attr_type.__name__, type(value).__name__))
+
+        setattr(self.obj, self.name, attr_value)
+
+
+class XPathElement(XPathAttribute):
+    """
+    This class is used to represent a reference to an element within a list.
+    Unlike scalar attributes, it is not entirely necessary to have this class
+    to represent the attribute because the element cannot be a simple scalar.
+    However, this class is used because it creates a uniform interface that can
+    be used by the setxattr and getxattr functions.
+    """
+
+    def __init__(self, container, key, value):
+        """Create an instance of XPathElement
+
+        Arguments:
+            container - the object that contains the element
+            key       - the name of the field that is used to identify the
+                        element
+            value     - the value of the key that identifies the element
+
+        """
+        self._container = container
+        self._value = value
+        self._key = key
+
+    @property
+    def value(self):
+        for element in self._container:
+            if getattr(element, self._key) == self._value:
+                return element
+
+        raise ValueError("specified element does not exist")
+
+    @value.setter
+    def value(self, value):
+        existing = None
+        for element in self._container:
+            if getattr(element, self._key) == self._value:
+                existing = element
+                break
+
+        if existing is not None:
+            self._container.remove(existing)
+
+        self._container.append(value)
+
+
+class XPathSelector(object):
+    def __init__(self, xpath):
+        """Creates an instance of XPathSelector
+
+        Arguments:
+            xpath - a string containing an xpath expression
+
+        """
+        self._tokens = tokenize(xpath)
+
+
+    def __call__(self, obj):
+        """Returns a reference to an attribute on the provided object
+
+        Using the defined xpath, an attribute is selected from the provided
+        object and returned.
+
+        Arguments:
+            obj - a GI object
+
+        Raises:
+            A ValueError is raised if the specified element in a list cannot be
+            found.
+
+        Returns:
+            an XPathAttribute that reference the specified attribute
+
+        """
+        current = obj
+        for token in self._tokens[:-1]:
+            # If the object is contained within a list, we will need to iterate
+            # through the tokens until we find a token that is a field of the
+            # object.
+            if token.name not in current.fields:
+                if current is obj:
+                    continue
+
+                raise ValueError('cannot find attribute {}'.format(token.name))
+
+            # If the token is a ListElement, try to find the matching element
+            if isinstance(token, ListElement):
+                for element in getattr(current, token.name):
+                    if getattr(element, token.key) == token.value:
+                        current = element
+                        break
+
+                else:
+                    raise ValueError('unable to find {}'.format(token.value))
+
+            else:
+                # Attribute the variable matching the name of the token
+                current = getattr(current, token.name)
+
+        # Process the final token
+        token = self._tokens[-1]
+
+        # If the token represents a list element, find the element in the list
+        # and return an XPathElement
+        if isinstance(token, ListElement):
+            container = getattr(current, token.name)
+            for element in container:
+                if getattr(element, token.key) == token.value:
+                    return XPathElement(container, token.key, token.value)
+
+            else:
+                raise ValueError('unable to find {}'.format(token.value))
+
+        # Otherwise, return the object as an XPathAttribute
+        return XPathAttribute(current, token.name)
+
+    @property
+    def tokens(self):
+        """The tokens in the xpath expression"""
+        return self._tokens
+
+
+# A global cache to avoid repeated parsing of known xpath expressions
+__xpath_cache = dict()
+
+
+def reset_cache():
+    global __xpath_cache
+    __xpath_cache = dict()
+
+
+def getxattr(obj, xpath):
+    """Return an attribute on the provided object
+
+    The xpath is parsed and used to identify an attribute on the provided
+    object. The object is expected to be a GI object where each attribute that
+    is accessible via an xpath expression is contained in the 'fields'
+    attribute of the object (NB: this is not true of GI lists, which do not
+    have a 'fields' attribute).
+
+    A selector is create for each xpath and used to find the specified
+    attribute. The accepted xpath expressions are those supported by the
+    XPathSelector class. The parsed xpath expression is cached so that
+    subsequent parsing is unnecessary. However, selectors are stored in a
+    global dictionary and this means that this function is not thread-safe.
+
+    Arguments:
+        obj   - a GI object
+        xpath - a string containing an xpath expression
+
+    Returns:
+        an attribute on the provided object
+
+    """
+    if xpath not in __xpath_cache:
+        __xpath_cache[xpath] = XPathSelector(xpath)
+
+    return __xpath_cache[xpath](obj).value
+
+
+def setxattr(obj, xpath, value):
+    """Set the attribute referred to by the xpath
+
+    Arguments:
+        obj   - a GI object
+        xpath - a string containing an xpath expression
+        value - the new value of the attribute
+
+    """
+    if xpath not in __xpath_cache:
+        __xpath_cache[xpath] = XPathSelector(xpath)
+
+    __xpath_cache[xpath](obj).value = value
diff --git a/rwlaunchpad/plugins/rwnsm/rwnsmtasklet.py b/rwlaunchpad/plugins/rwnsm/rwnsmtasklet.py
new file mode 100755
index 0000000..3b5c072
--- /dev/null
+++ b/rwlaunchpad/plugins/rwnsm/rwnsmtasklet.py
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwnsmtasklet
+
+class Tasklet(rift.tasklets.rwnsmtasklet.NsmTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwlaunchpad/plugins/rwresmgr/CMakeLists.txt b/rwlaunchpad/plugins/rwresmgr/CMakeLists.txt
new file mode 100644
index 0000000..e757e43
--- /dev/null
+++ b/rwlaunchpad/plugins/rwresmgr/CMakeLists.txt
@@ -0,0 +1,41 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 05/15/2015
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwresmgrtasklet)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
+    rift/tasklets/${TASKLET_NAME}/rwresmgr_config.py
+    rift/tasklets/${TASKLET_NAME}/rwresmgr_core.py
+    rift/tasklets/${TASKLET_NAME}/rwresmgr_events.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/rwlaunchpad/plugins/rwresmgr/Makefile b/rwlaunchpad/plugins/rwresmgr/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwresmgr/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/__init__.py b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/__init__.py
new file mode 100644
index 0000000..1ee19e3
--- /dev/null
+++ b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/__init__.py
@@ -0,0 +1 @@
+from .rwresmgrtasklet import ResMgrTasklet
diff --git a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py
new file mode 100644
index 0000000..5035b18
--- /dev/null
+++ b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py
@@ -0,0 +1,115 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import logging
+import time
+import uuid
+from enum import Enum
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwYang,
+    RwResourceMgrYang,
+    RwLaunchpadYang,
+    RwcalYang,
+)
+
+from gi.repository.RwTypes import RwStatus
+import rift.tasklets
+import rift.mano.cloud
+
+
+class ResourceMgrConfig(object):
+    XPATH_POOL_OPER_DATA = "D,/rw-resource-mgr:resource-pool-records"
+    def __init__(self, dts, log, rwlog_hdl, loop, parent):
+        self._dts = dts
+        self._log = log
+        self._rwlog_hdl = rwlog_hdl
+        self._loop = loop
+        self._parent = parent
+
+        self._cloud_sub = None
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self.register_resource_pool_operational_data()
+        self.register_cloud_account_config()
+
+    def register_cloud_account_config(self):
+        def on_add_cloud_account_apply(account):
+            self._log.debug("Received on_add_cloud_account: %s", account)
+            self._parent.add_cloud_account_config(account)
+
+        def on_delete_cloud_account_apply(account_name):
+            self._log.debug("Received on_delete_cloud_account_apply: %s", account_name)
+            self._parent.delete_cloud_account_config(account_name)
+
+        @asyncio.coroutine
+        def on_delete_cloud_account_prepare(account_name):
+            self._log.debug("Received on_delete_cloud_account_prepare: %s", account_name)
+            self._parent.delete_cloud_account_config(account_name, dry_run=True)
+
+        cloud_callbacks = rift.mano.cloud.CloudAccountConfigCallbacks(
+                on_add_apply=on_add_cloud_account_apply,
+                on_delete_apply=on_delete_cloud_account_apply,
+                on_delete_prepare=on_delete_cloud_account_prepare,
+                )
+
+        self._cloud_sub = rift.mano.cloud.CloudAccountConfigSubscriber(
+                self._dts, self._log, self._rwlog_hdl, cloud_callbacks
+                )
+        self._cloud_sub.register()
+
+    @asyncio.coroutine
+    def register_resource_pool_operational_data(self):
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            self._log.debug("ResourceMgr providing resource-pool information")
+            msg = RwResourceMgrYang.ResourcePoolRecords()
+
+            cloud_accounts = self._parent.get_cloud_account_names()
+            for cloud_account_name in cloud_accounts:
+                pools = self._parent.get_pool_list(cloud_account_name)
+                self._log.debug("Publishing information about cloud account %s %d resource pools",
+                                cloud_account_name, len(pools))
+
+                cloud_account_msg = msg.cloud_account.add()
+                cloud_account_msg.name = cloud_account_name
+                for pool in pools:
+                    pool_info = self._parent.get_pool_info(cloud_account_name, pool.name)
+                    cloud_account_msg.records.append(pool_info)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK,
+                                    ResourceMgrConfig.XPATH_POOL_OPER_DATA,
+                                    msg=msg,)
+
+        self._log.debug("Registering for Resource Mgr resource-pool-record using xpath: %s",
+                        ResourceMgrConfig.XPATH_POOL_OPER_DATA)
+
+        handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
+        response = yield from self._dts.register(xpath=ResourceMgrConfig.XPATH_POOL_OPER_DATA,
+                                                 handler=handler,
+                                                 flags=rwdts.Flag.PUBLISHER)
+
diff --git a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_core.py b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_core.py
new file mode 100644
index 0000000..d2897f8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_core.py
@@ -0,0 +1,1480 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import uuid
+import collections
+import asyncio
+import concurrent.futures
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import (
+    RwDts as rwdts,
+    RwYang,
+    RwResourceMgrYang,
+    RwLaunchpadYang,
+    RwcalYang,
+)
+
+from gi.repository.RwTypes import RwStatus
+
+class ResMgrCALNotPresent(Exception):
+    pass
+
+class ResMgrCloudAccountNotFound(Exception):
+    pass
+
+class ResMgrCloudAccountExists(Exception):
+    pass
+
+class ResMgrCloudAccountInUse(Exception):
+    pass
+
+class ResMgrDuplicatePool(Exception):
+    pass
+
+class ResMgrPoolNotAvailable(Exception):
+    pass
+
+class ResMgrPoolOperationFailed(Exception):
+    pass
+
+class ResMgrDuplicateEventId(Exception):
+    pass
+
+class ResMgrUnknownEventId(Exception):
+    pass
+
+class ResMgrUnknownResourceId(Exception):
+    pass
+
+class ResMgrResourceIdBusy(Exception):
+    pass
+
+class ResMgrResourceIdNotAllocated(Exception):
+    pass
+
+class ResMgrNoResourcesAvailable(Exception):
+    pass
+
+class ResMgrResourcesInitFailed(Exception):
+    pass
+
+class ResMgrCALOperationFailure(Exception):
+    pass
+
+
+
+class ResourceMgrCALHandler(object):
+    def __init__(self, loop, executor, log, log_hdl, account):
+        self._log = log
+        self._loop = loop
+        self._executor = executor
+        self._account = account.cal_account_msg
+        self._rwcal = account.cal
+        if account.account_type == 'aws':
+            self._subnets = ["172.31.97.0/24", "172.31.98.0/24", "172.31.99.0/24", "172.31.100.0/24", "172.31.101.0/24"]
+        else:
+            self._subnets = ["11.0.0.0/24",
+                             "12.0.0.0/24",
+                             "13.0.0.0/24",
+                             "14.0.0.0/24",
+                             "15.0.0.0/24",
+                             "16.0.0.0/24",
+                             "17.0.0.0/24",
+                             "18.0.0.0/24",
+                             "19.0.0.0/24",
+                             "20.0.0.0/24",
+                             "21.0.0.0/24",
+                             "22.0.0.0/24",]
+        self._subnet_ptr = 0
+
+    def _select_link_subnet(self):
+        subnet = self._subnets[self._subnet_ptr]
+        self._subnet_ptr += 1
+        if self._subnet_ptr == len(self._subnets):
+            self._subnet_ptr = 0
+        return subnet
+
+    @asyncio.coroutine
+    def create_virtual_network(self, req_params):
+        #rc, rsp = self._rwcal.get_virtual_link_list(self._account)
+        self._log.debug("Calling get_virtual_link_list API")
+        rc, rsp = yield from self._loop.run_in_executor(self._executor,
+                                                        self._rwcal.get_virtual_link_list,
+                                                        self._account)
+            
+        assert rc == RwStatus.SUCCESS
+
+        links = [vlink for vlink in rsp.virtual_link_info_list if vlink.name == req_params.name]
+        if links:
+            self._log.debug("Found existing virtual-network with matching name in cloud. Reusing the virtual-network with id: %s" %(links[0].virtual_link_id))
+            return ('precreated', links[0].virtual_link_id)
+        elif req_params.vim_network_name:
+            self._log.error("Virtual-network-allocate operation failed for cloud account: %s Vim Network with name %s does not pre-exist",
+                    self._account.name, req_params.vim_network_name)
+            raise ResMgrCALOperationFailure("Virtual-network allocate operation failed for cloud account: %s Vim Network with name %s does not pre-exist"
+                    %(self._account.name, req_params.vim_network_name))
+
+        params = RwcalYang.VirtualLinkReqParams()
+        params.from_dict(req_params.as_dict())
+        params.subnet = self._select_link_subnet()
+        #rc, rs = self._rwcal.create_virtual_link(self._account, params)
+        self._log.debug("Calling create_virtual_link API with params: %s" %(str(req_params)))
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.create_virtual_link,
+                                                       self._account,
+                                                       params)
+        if rc.status != RwStatus.SUCCESS:
+            self._log.error("Virtual-network-allocate operation failed for cloud account: %s - error_msg: %s, Traceback: %s",
+                    self._account.name, rc.error_msg, rc.traceback)
+            raise ResMgrCALOperationFailure("Virtual-network allocate operation failed for cloud account: %s (%s)"
+                    %(self._account.name, rc.error_msg))
+
+        return ('dynamic',rs)
+
+    @asyncio.coroutine
+    def delete_virtual_network(self, network_id):
+        #rc = self._rwcal.delete_virtual_link(self._account, network_id)
+        self._log.debug("Calling delete_virtual_link API with id: %s" %(network_id))
+        rc = yield from self._loop.run_in_executor(self._executor,
+                                                   self._rwcal.delete_virtual_link,
+                                                   self._account,
+                                                   network_id)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Virtual-network-release operation failed for cloud account: %s. ResourceID: %s",
+                            self._account.name,
+                            network_id)
+            raise ResMgrCALOperationFailure("Virtual-network release operation failed for cloud account: %s. ResourceId: %s" %(self._account.name, network_id))
+
+    @asyncio.coroutine        
+    def get_virtual_network_info(self, network_id):
+        #rc, rs = self._rwcal.get_virtual_link(self._account, network_id)
+        self._log.debug("Calling get_virtual_link_info API with id: %s" %(network_id))
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.get_virtual_link,
+                                                       self._account,
+                                                       network_id)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Virtual-network-info operation failed for cloud account: %s. ResourceID: %s",
+                            self._account.name,
+                            network_id)
+            raise ResMgrCALOperationFailure("Virtual-network-info operation failed for cloud account: %s. ResourceID: %s" %(self._account.name, network_id))
+        return rs
+
+    @asyncio.coroutine
+    def create_virtual_compute(self, req_params):
+        #rc, rsp = self._rwcal.get_vdu_list(self._account)
+        self._log.debug("Calling get_vdu_list API")
+        rc, rsp = yield from self._loop.run_in_executor(self._executor,
+                                                        self._rwcal.get_vdu_list,
+                                                        self._account)
+        assert rc == RwStatus.SUCCESS
+        vdus = [vm for vm in rsp.vdu_info_list if vm.name == req_params.name]
+        if vdus:
+            self._log.debug("Found existing virtual-compute with matching name in cloud. Reusing the virtual-compute element with id: %s" %(vdus[0].vdu_id))
+            return vdus[0].vdu_id
+
+        params = RwcalYang.VDUInitParams()
+        params.from_dict(req_params.as_dict())
+
+        image_checksum = req_params.image_checksum if req_params.has_field("image_checksum") else None
+        params.image_id = yield from self.get_image_id_from_image_info(req_params.image_name, image_checksum)
+
+        #rc, rs = self._rwcal.create_vdu(self._account, params)
+        self._log.debug("Calling create_vdu API with params %s" %(str(params)))
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.create_vdu,
+                                                       self._account,
+                                                       params)
+
+        if rc.status != RwStatus.SUCCESS:
+            self._log.error("Virtual-compute-create operation failed for cloud account: %s - error_msg: %s, Traceback: %s",
+                    self._account.name, rc.error_msg, rc.traceback)
+            raise ResMgrCALOperationFailure("Virtual-compute-create operation failed for cloud account: %s (%s)"
+                    %(self._account.name, rc.error_msg))
+
+        return rs
+
+    @asyncio.coroutine
+    def modify_virtual_compute(self, req_params):
+        #rc = self._rwcal.modify_vdu(self._account, req_params)
+        self._log.debug("Calling modify_vdu API with params: %s" %(str(req_params)))
+        rc = yield from self._loop.run_in_executor(self._executor,
+                                                   self._rwcal.modify_vdu,
+                                                   self._account,
+                                                   req_params)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Virtual-compute-modify operation failed for cloud account: %s", self._account.name)
+            raise ResMgrCALOperationFailure("Virtual-compute-modify operation failed for cloud account: %s" %(self._account.name))
+
+    @asyncio.coroutine        
+    def delete_virtual_compute(self, compute_id):
+        #rc = self._rwcal.delete_vdu(self._account, compute_id)
+        self._log.debug("Calling delete_vdu API with id: %s" %(compute_id))
+        rc = yield from self._loop.run_in_executor(self._executor,
+                                                   self._rwcal.delete_vdu,
+                                                   self._account,
+                                                   compute_id)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Virtual-compute-release operation failed for cloud account: %s. ResourceID: %s",
+                            self._account.name,
+                            compute_id)
+            raise ResMgrCALOperationFailure("Virtual-compute-release operation failed for cloud account: %s. ResourceID: %s" %(self._account.name, compute_id))
+
+    @asyncio.coroutine        
+    def get_virtual_compute_info(self, compute_id):
+        #rc, rs = self._rwcal.get_vdu(self._account, compute_id)
+        self._log.debug("Calling get_vdu API with id: %s" %(compute_id))
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.get_vdu,
+                                                       self._account,
+                                                       compute_id)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Virtual-compute-info operation failed for cloud account: %s. ResourceID: %s",
+                            self._account.name,
+                            compute_id)
+            raise ResMgrCALOperationFailure("Virtual-compute-info operation failed for cloud account: %s. ResourceID: %s" %(self._account.name, compute_id))
+        return rs
+
+    @asyncio.coroutine
+    def get_compute_flavor_info_list(self):
+        #rc, rs = self._rwcal.get_flavor_list(self._account)
+        self._log.debug("Calling get_flavor_list API")
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.get_flavor_list,
+                                                       self._account)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Get-flavor-info-list operation failed for cloud account: %s",
+                            self._account.name)
+            raise ResMgrCALOperationFailure("Get-flavor-info-list operation failed for cloud account: %s" %(self._account.name))
+        return rs.flavorinfo_list
+
+    @asyncio.coroutine
+    def create_compute_flavor(self, request):
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name = str(uuid.uuid4())
+        epa_types = ['vm_flavor', 'guest_epa', 'host_epa', 'host_aggregate']
+        epa_dict = {k: v for k, v in request.as_dict().items() if k in epa_types}
+        flavor.from_dict(epa_dict)
+
+        self._log.info("Creating flavor: %s", flavor)
+        #rc, rs = self._rwcal.create_flavor(self._account, flavor)
+        self._log.debug("Calling create_flavor API")
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.create_flavor,
+                                                       self._account,
+                                                       flavor)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Create-flavor operation failed for cloud account: %s",
+                            self._account.name)
+            raise ResMgrCALOperationFailure("Create-flavor operation failed for cloud account: %s" %(self._account.name))
+        return rs
+
+    @asyncio.coroutine
+    def get_image_info_list(self):
+        #rc, rs = self._rwcal.get_image_list(self._account)
+        self._log.debug("Calling get_image_list API")
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.get_image_list,
+                                                       self._account)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Get-image-info-list operation failed for cloud account: %s",
+                            self._account.name)
+            raise ResMgrCALOperationFailure("Get-image-info-list operation failed for cloud account: %s" %(self._account.name))
+        return rs.imageinfo_list
+
+    @asyncio.coroutine
+    def get_image_id_from_image_info(self, image_name, image_checksum=None):
+        self._log.debug("Looking up image id for image name %s and checksum %s on cloud account: %s",
+                image_name, image_checksum, self._account.name
+                )
+
+        image_list = yield from self.get_image_info_list()
+        matching_images = [i for i in image_list if i.name == image_name]
+
+        # If the image checksum was filled in then further filter the images by the checksum
+        if image_checksum is not None:
+            matching_images = [i for i in matching_images if i.checksum == image_checksum]
+        else:
+            self._log.warning("Image checksum not provided.  Lookup using image name (%s) only.",
+                              image_name)
+
+        if len(matching_images) == 0:
+            raise ResMgrCALOperationFailure("Could not find image name {} (using checksum: {}) for cloud account: {}".format(
+                image_name, image_checksum, self._account.name
+                ))
+
+        elif len(matching_images) > 1:
+            unique_checksums = {i.checksum for i in matching_images}
+            if len(unique_checksums) > 1:
+                msg = ("Too many images with different checksums matched "
+                       "image name of %s for cloud account: %s" % (image_name, self._account.name))
+                raise ResMgrCALOperationFailure(msg)
+
+        return matching_images[0].id
+
+    @asyncio.coroutine
+    def get_image_info(self, image_id):
+        #rc, rs = self._rwcal.get_image(self._account, image_id)
+        self._log.debug("Calling get_image API for id: %s" %(image_id))
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.get_image,
+                                                       self._account,
+                                                       image_id)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Get-image-info-list operation failed for cloud account: %s",
+                            self._account.name)
+            raise ResMgrCALOperationFailure("Get-image-info operation failed for cloud account: %s" %(self._account.name))
+        return rs.imageinfo_list
+
+    def dynamic_flavor_supported(self):
+        return getattr(self._account, self._account.account_type).dynamic_flavor_support
+
+
+class Resource(object):
+    def __init__(self, resource_id, resource_type):
+        self._id = resource_id
+        self._type = resource_type
+
+    @property
+    def resource_id(self):
+        return self._id
+
+    @property
+    def resource_type(self):
+        return self._type
+
+    def cleanup(self):
+        pass
+
+
+class ComputeResource(Resource):
+    def __init__(self, resource_id, resource_type):
+        super(ComputeResource, self).__init__(resource_id, resource_type)
+
+
+class NetworkResource(Resource):
+    def __init__(self, resource_id, resource_type):
+        super(NetworkResource, self).__init__(resource_id, resource_type)
+
+
+class ResourcePoolInfo(object):
+    def __init__(self, name, pool_type, resource_type, max_size):
+        self.name = name
+        self.pool_type = pool_type
+        self.resource_type = resource_type
+        self.max_size = max_size
+
+    @classmethod
+    def from_dict(cls, pool_dict):
+        return cls(
+                pool_dict["name"],
+                pool_dict["pool_type"],
+                pool_dict["resource_type"],
+                pool_dict["max_size"],
+                )
+
+
+class ResourcePool(object):
+    def __init__(self, log, loop, pool_info, resource_class, cal):
+        self._log = log
+        self._loop = loop
+        self._name = pool_info.name
+        self._pool_type = pool_info.pool_type
+        self._resource_type = pool_info.resource_type
+        self._cal = cal
+        self._resource_class = resource_class
+
+        self._max_size = pool_info.max_size
+
+        self._status = 'unlocked'
+        ### A Dictionary of all the resources in this pool, keyed by CAL resource-id
+        self._all_resources = {}
+        ### A List of free resources in this pool
+        self._free_resources = []
+        ### A Dictionary of all the allocated resources in this pool, keyed by CAL resource-id
+        self._allocated_resources = {}
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def cal(self):
+        """ This instance's ResourceMgrCALHandler """
+        return self._cal
+
+    @property
+    def pool_type(self):
+        return self._pool_type
+
+    @property
+    def resource_type(self):
+        return self._resource_type
+
+    @property
+    def max_size(self):
+        return self._max_size
+
+    @property
+    def status(self):
+        return self._status
+
+    def in_use(self):
+        if len(self._allocated_resources) != 0:
+            return True
+        else:
+            return False
+
+    def update_cal_handler(self, cal):
+        if self.in_use():
+            raise ResMgrPoolOperationFailed(
+                    "Cannot update CAL plugin for in use pool"
+                    )
+
+        self._cal = cal
+
+    def lock_pool(self):
+        self._log.info("Locking the pool :%s", self.name)
+        self._status = 'locked'
+
+    def unlock_pool(self):
+        self._log.info("Unlocking the pool :%s", self.name)
+        self._status = 'unlocked'
+
+    def add_resource(self, resource_info):
+        self._log.info("Adding static resource to Pool: %s, Resource-id: %s Resource-Type: %s",
+                       self.name,
+                       resource_info.resource_id,
+                       self.resource_type)
+
+        ### Add static resources to pool
+        resource = self._resource_class(resource_info.resource_id, 'static')
+        assert resource.resource_id == resource_info.resource_id
+        self._all_resources[resource.resource_id] = resource
+        self._free_resources.append(resource)
+
+    def delete_resource(self, resource_id):
+        if resource_id not in self._all_resources:
+            self._log.error("Resource Id: %s not present in pool: %s. Delete operation failed", resource_id, self.name)
+            raise ResMgrUnknownResourceId("Resource Id: %s requested for release is not found" %(resource_id))
+
+        if resource_id in self._allocated_resources:
+            self._log.error("Resource Id: %s in use. Delete operation failed", resource_id)
+            raise ResMgrResourceIdBusy("Resource Id: %s requested for release is in use" %(resource_id))
+
+        self._log.info("Deleting resource: %s from pool: %s, Resource-Type",
+                       resource_id,
+                       self.name,
+                       self.resource_type)
+
+        resource = self._all_resources.pop(resource_id)
+        self._free_resources.remove(resource)
+        resource.cleanup()
+        del resource
+
+    @asyncio.coroutine
+    def read_resource_info(self, resource_id):
+        if resource_id not in self._all_resources:
+            self._log.error("Resource Id: %s not present in pool: %s. Read operation failed", resource_id, self.name)
+            raise ResMgrUnknownResourceId("Resource Id: %s requested for read is not found" %(resource_id))
+
+        if resource_id not in self._allocated_resources:
+            self._log.error("Resource Id: %s not in use. Read operation failed", resource_id)
+            raise ResMgrResourceIdNotAllocated("Resource Id: %s not in use. Read operation failed" %(resource_id))
+
+        resource = self._allocated_resources[resource_id]
+        resource_info = yield from self.get_resource_info(resource)
+        return resource_info
+
+    def get_pool_info(self):
+        info = RwResourceMgrYang.ResourceRecordInfo()
+        self._log.info("Providing info for pool: %s", self.name)
+        info.name = self.name
+        if self.pool_type:
+            info.pool_type = self.pool_type
+        if self.resource_type:
+            info.resource_type = self.resource_type
+        if self.status:
+            info.pool_status = self.status
+
+        info.total_resources = len(self._all_resources)
+        info.free_resources = len(self._free_resources)
+        info.allocated_resources = len(self._allocated_resources)
+        return info
+
+    def cleanup(self):
+        for _, v in self._all_resources.items():
+            v.cleanup()
+
+    @asyncio.coroutine
+    def _allocate_static_resource(self, request, resource_type):
+        unit_type = {'compute': 'VDU', 'network':'VirtualLink'}
+        match_found = False
+        resource = None
+        self._log.info("Doing resource match from pool :%s", self._free_resources)
+        for resource in self._free_resources:
+            resource_info = yield from self.get_resource_info(resource)
+            self._log.info("Attempting to match %s-requirements for %s: %s with resource-id :%s",
+                           resource_type, unit_type[resource_type],request.name, resource.resource_id)
+            if self.match_epa_params(resource_info, request):
+                if self.match_image_params(resource_info, request):
+                    match_found = True
+                    self._log.info("%s-requirements matched for %s: %s with resource-id :%s",
+                                   resource_type, unit_type[resource_type],request.name, resource.resource_id)
+                    yield from self.initialize_resource_in_cal(resource, request)
+            break
+
+        if not match_found:
+            self._log.error("No match found for %s-requirements for %s: %s in pool: %s. %s instantiation failed",
+                            resource_type,
+                            unit_type[resource_type],
+                            request.name,
+                            self.name,
+                            unit_type[resource_type])
+            return None
+        else:
+            ### Move resource from free-list into allocated-list
+            self._log.info("Allocating the static resource with resource-id: %s for %s: %s",
+                           resource.resource_id,
+                           unit_type[resource_type],request.name)
+            self._free_resources.remove(resource)
+            self._allocated_resources[resource.resource_id] = resource
+
+        return resource
+
+    @asyncio.coroutine
+    def allocate_resource(self, request):
+        resource = yield from self.allocate_resource_in_cal(request)
+        resource_info =  yield from self.get_resource_info(resource)
+        return resource.resource_id, resource_info
+
+    @asyncio.coroutine
+    def release_resource(self, resource_id):
+        self._log.debug("Releasing resource_id %s in pool %s", resource_id, self.name)
+        if resource_id not in self._allocated_resources:
+            self._log.error("Failed to release a resource with resource-id: %s in pool: %s. Resource not known",
+                            resource_id,
+                            self.name)
+            raise ResMgrUnknownResourceId("Failed to release resource with resource-id: %s. Unknown resource-id" %(resource_id))
+
+        ### Get resource object
+        resource = self._allocated_resources.pop(resource_id)
+        yield from self.uninitialize_resource_in_cal(resource)
+        yield from self.release_cal_resource(resource)
+
+
+class NetworkPool(ResourcePool):
+    def __init__(self, log, loop, pool_info, cal):
+        super(NetworkPool, self).__init__(log, loop, pool_info, NetworkResource, cal)
+
+    @asyncio.coroutine
+    def allocate_resource_in_cal(self, request):
+        resource = None
+        if self.pool_type == 'static':
+            self._log.info("Attempting network resource allocation from static pool: %s", self.name)
+            ### Attempt resource allocation from static pool
+            resource = yield from self._allocate_static_resource(request, 'network')
+        elif self.pool_type == 'dynamic':
+            ### Attempt resource allocation from dynamic pool
+            self._log.info("Attempting network resource allocation from dynamic pool: %s", self.name)
+            if len(self._free_resources) != 0:
+                self._log.info("Dynamic pool: %s has %d static resources, Attempting resource allocation from static resources",
+                               self.name, len(self._free_resources))
+                resource =  yield from self._allocate_static_resource(request, 'network')
+            if resource is None:
+                self._log.info("Could not resource from static resources. Going for dynamic resource allocation")
+                ## Not static resource available. Attempt dynamic resource from pool
+                resource = yield from self.allocate_dynamic_resource(request)
+        if resource is None:
+            raise ResMgrNoResourcesAvailable("No matching resource available for allocation from pool: %s" %(self.name))
+        return resource
+
+    @asyncio.coroutine
+    def allocate_dynamic_resource(self, request):
+        resource_type, resource_id = yield from self._cal.create_virtual_network(request)
+        if resource_id in self._all_resources:
+            self._log.error("Resource with id %s name %s of type %s is already used", resource_id, request.name, resource_type)
+            raise ResMgrNoResourcesAvailable("Resource with name %s of type network is already used" %(resource_id))
+        resource = self._resource_class(resource_id, resource_type)
+        self._all_resources[resource_id] = resource
+        self._allocated_resources[resource_id] = resource
+        self._log.info("Successfully allocated virtual-network resource from CAL with resource-id: %s", resource_id)
+        return resource
+
+    @asyncio.coroutine
+    def release_cal_resource(self, resource):
+        if resource.resource_type == 'dynamic':
+            self._log.debug("Deleting virtual network with network_id: %s", resource.resource_id)
+            yield from self._cal.delete_virtual_network(resource.resource_id)
+            self._all_resources.pop(resource.resource_id)
+            self._log.info("Successfully released virtual-network resource in CAL with resource-id: %s", resource.resource_id)
+        elif resource.resource_type == 'precreated':
+            self._all_resources.pop(resource.resource_id)
+            self._log.info("Successfully removed precreated virtual-network resource from allocated list: %s", resource.resource_id)
+        else:
+            self._log.info("Successfully released virtual-network resource with resource-id: %s into available-list", resource.resource_id)
+            self._free_resources.append(resource)
+
+    @asyncio.coroutine
+    def get_resource_info(self, resource):
+        info = yield from self._cal.get_virtual_network_info(resource.resource_id)
+        self._log.info("Successfully retrieved virtual-network information from CAL with resource-id: %s. Info: %s",
+                       resource.resource_id, str(info))
+        response = RwResourceMgrYang.VirtualLinkEventData_ResourceInfo()
+        response.from_dict(info.as_dict())
+        response.pool_name = self.name
+        response.resource_state = 'active'
+        return response
+
+    @asyncio.coroutine
+    def get_info_by_id(self, resource_id):
+        info = yield from self._cal.get_virtual_network_info(resource_id)
+        self._log.info("Successfully retrieved virtual-network information from CAL with resource-id: %s. Info: %s",
+                       resource_id, str(info))
+        return info
+
+    def match_image_params(self, resource_info, request_params):
+        return True
+
+    def match_epa_params(self, resource_info, request_params):
+        if not hasattr(request_params, 'provider_network'):
+            ### Its a match if nothing is requested
+            return True
+        else:
+            required = getattr(request_params, 'provider_network')
+
+        if not hasattr(resource_info, 'provider_network'):
+            ### Its no match
+            return False
+        else:
+            available = getattr(resource_info, 'provider_network')
+
+        self._log.debug("Matching Network EPA params. Required: %s, Available: %s", required, available)
+
+        if required.has_field('name') and required.name!= available.name:
+            self._log.debug("Provider Network mismatch. Required: %s, Available: %s",
+                            required.name,
+                            available.name)
+            return False
+
+        self._log.debug("Matching EPA params physical network name")
+
+        if required.has_field('physical_network') and required.physical_network != available.physical_network:
+            self._log.debug("Physical Network mismatch. Required: %s, Available: %s",
+                            required.physical_network,
+                            available.physical_network)
+            return False
+
+        self._log.debug("Matching EPA params overlay type")
+        if required.has_field('overlay_type') and required.overlay_type != available.overlay_type:
+            self._log.debug("Overlay type mismatch. Required: %s, Available: %s",
+                            required.overlay_type,
+                            available.overlay_type)
+            return False
+
+        self._log.debug("Matching EPA params SegmentationID")
+        if required.has_field('segmentation_id') and required.segmentation_id != available.segmentation_id:
+            self._log.debug("Segmentation-Id mismatch. Required: %s, Available: %s",
+                            required.segmentation_id,
+                            available.segmentation_id)
+            return False
+        return True
+
+    @asyncio.coroutine
+    def initialize_resource_in_cal(self, resource, request):
+        pass
+
+    @asyncio.coroutine
+    def uninitialize_resource_in_cal(self, resource):
+        pass
+
+
+class ComputePool(ResourcePool):
+    def __init__(self, log, loop, pool_info, cal):
+        super(ComputePool, self).__init__(log, loop, pool_info, ComputeResource, cal)
+
+    @asyncio.coroutine
+    def allocate_resource_in_cal(self, request):
+        resource = None
+        if self.pool_type == 'static':
+            self._log.info("Attempting compute resource allocation from static pool: %s", self.name)
+            ### Attempt resource allocation from static pool
+            resource = yield from self._allocate_static_resource(request, 'compute')
+        elif self.pool_type == 'dynamic':
+            ### Attempt resource allocation from dynamic pool
+            self._log.info("Attempting compute resource allocation from dynamic pool: %s", self.name)
+            if len(self._free_resources) != 0:
+                self._log.info("Dynamic pool: %s has %d static resources, Attempting resource allocation from static resources",
+                               len(self._free_resources),
+                               self.name)
+                resource = yield from self._allocate_static_resource(request, 'compute')
+            if resource is None:
+                self._log.info("Attempting for dynamic resource allocation")
+                resource = yield from self.allocate_dynamic_resource(request)
+        if resource is None:
+            raise ResMgrNoResourcesAvailable("No matching resource available for allocation from pool: %s" %(self.name))
+
+        requested_params = RwcalYang.VDUInitParams()
+        requested_params.from_dict(request.as_dict())
+        resource.requested_params = requested_params
+        return resource
+
+    @asyncio.coroutine
+    def allocate_dynamic_resource(self, request):
+        #request.flavor_id = yield from self.select_resource_flavor(request)
+        resource_id = yield from self._cal.create_virtual_compute(request)
+        resource = self._resource_class(resource_id, 'dynamic')
+        self._all_resources[resource_id] = resource
+        self._allocated_resources[resource_id] = resource
+        self._log.info("Successfully allocated virtual-compute resource from CAL with resource-id: %s", resource_id)
+        return resource
+
+    @asyncio.coroutine
+    def release_cal_resource(self, resource):
+        if hasattr(resource, 'requested_params'):
+            delattr(resource, 'requested_params')
+        if resource.resource_type == 'dynamic':
+            yield from self._cal.delete_virtual_compute(resource.resource_id)
+            self._all_resources.pop(resource.resource_id)
+            self._log.info("Successfully released virtual-compute resource in CAL with resource-id: %s", resource.resource_id)
+        else:
+            self._log.info("Successfully released virtual-compute resource with resource-id: %s into available-list", resource.resource_id)
+            self._free_resources.append(resource)
+
+    @asyncio.coroutine
+    def get_resource_info(self, resource):
+        info = yield from self._cal.get_virtual_compute_info(resource.resource_id)
+        self._log.info("Successfully retrieved virtual-compute information from CAL with resource-id: %s. Info: %s",
+                       resource.resource_id, str(info))
+        response = RwResourceMgrYang.VDUEventData_ResourceInfo()
+        response.from_dict(info.as_dict())
+        response.pool_name = self.name
+        response.resource_state = self._get_resource_state(info, resource.requested_params)
+        return response
+
+    @asyncio.coroutine
+    def get_info_by_id(self, resource_id):
+        info = yield from self._cal.get_virtual_compute_info(resource_id)
+        self._log.info("Successfully retrieved virtual-compute information from CAL with resource-id: %s. Info: %s",
+                       resource_id, str(info))
+        return info 
+
+    def _get_resource_state(self, resource_info, requested_params):
+        if resource_info.state == 'failed':
+            self._log.error("<Compute-Resource: %s> Reached failed state.",
+                            resource_info.name)
+            return 'failed'
+
+        if resource_info.state != 'active':
+            self._log.info("<Compute-Resource: %s> Not reached active state.",
+                           resource_info.name)
+            return 'pending'
+
+        if not resource_info.has_field('management_ip') or resource_info.management_ip == '':
+            self._log.info("<Compute-Resource: %s> Management IP not assigned.",
+                           resource_info.name)
+            return 'pending'
+
+        if (requested_params.has_field('allocate_public_address')) and (requested_params.allocate_public_address == True):
+            if not resource_info.has_field('public_ip'):
+                self._log.warning("<Compute-Resource: %s> Management IP not assigned- waiting for public ip, %s",
+                                  resource_info.name, requested_params)
+                return 'pending'
+
+        if(len(requested_params.connection_points) != 
+           len(resource_info.connection_points)):
+            self._log.warning("<Compute-Resource: %s> Waiting for requested number of ports to be assigned to virtual-compute, requested: %d, assigned: %d",
+                              resource_info.name,
+                              len(requested_params.connection_points),
+                              len(resource_info.connection_points))
+            return 'pending'
+
+        #not_active = [c for c in resource_info.connection_points
+        #              if c.state != 'active']
+
+        #if not_active:
+        #    self._log.warning("<Compute-Resource: %s> Management IP not assigned- waiting for connection_points , %s",
+        #                      resource_info.name, resource_info)
+        #    return 'pending'
+
+        ## Find the connection_points which are in active state but does not have IP address
+        no_address = [c for c in resource_info.connection_points
+                      if (c.state == 'active') and (not c.has_field('ip_address'))]
+
+        if no_address:
+            self._log.warning("<Compute-Resource: %s> Management IP not assigned- waiting for connection_points , %s",
+                              resource_info.name, resource_info)
+            return 'pending'
+
+        return 'active'
+
+    @asyncio.coroutine
+    def select_resource_flavor(self, request):
+        flavors = yield from self._cal.get_compute_flavor_info_list()
+        self._log.debug("Received %d flavor information from RW.CAL", len(flavors))
+        flavor_id = None
+        match_found = False
+        for flv in flavors:
+            self._log.info("Attempting to match compute requirement for VDU: %s with flavor %s",
+                           request.name, flv)
+            if self.match_epa_params(flv, request):
+                self._log.info("Flavor match found for compute requirements for VDU: %s with flavor name: %s, flavor-id: %s",
+                               request.name, flv.name, flv.id)
+                match_found = True
+                flavor_id = flv.id
+                break
+
+        if not match_found:
+            ### Check if CAL account allows dynamic flavor creation
+            if self._cal.dynamic_flavor_supported():
+                self._log.info("Attempting to create a new flavor for required compute-requirement for VDU: %s", request.name)
+                flavor_id = yield from self._cal.create_compute_flavor(request)
+            else:
+                ### No match with existing flavors and CAL does not support dynamic flavor creation
+                self._log.error("Unable to create flavor for compute requirement for VDU: %s. VDU instantiation failed", request.name)
+                raise ResMgrNoResourcesAvailable("No resource available with matching EPA attributes")
+        else:
+            ### Found flavor
+            self._log.info("Found flavor with id: %s for compute requirement for VDU: %s",
+                           flavor_id, request.name)
+        return flavor_id
+
+    def _match_vm_flavor(self, required, available):
+        self._log.info("Matching VM Flavor attributes")
+        if available.vcpu_count != required.vcpu_count:
+            self._log.debug("VCPU requirement mismatch. Required: %d, Available: %d",
+                            required.vcpu_count,
+                            available.vcpu_count)
+            return False
+        if available.memory_mb != required.memory_mb:
+            self._log.debug("Memory requirement mismatch. Required: %d MB, Available: %d MB",
+                            required.memory_mb,
+                            available.memory_mb)
+            return False
+        if available.storage_gb != required.storage_gb:
+            self._log.debug("Storage requirement mismatch. Required: %d GB, Available: %d GB",
+                            required.storage_gb,
+                            available.storage_gb)
+            return False
+        self._log.debug("VM Flavor match found")
+        return True
+
+    def _match_guest_epa(self, required, available):
+        self._log.info("Matching Guest EPA attributes")
+        if required.has_field('pcie_device'):
+            self._log.debug("Matching pcie_device")
+            if available.has_field('pcie_device') == False:
+                self._log.debug("Matching pcie_device failed. Not available in flavor")
+                return False
+            else:
+                for dev in required.pcie_device:
+                    if not [ d for d in available.pcie_device
+                             if ((d.device_id == dev.device_id) and (d.count == dev.count)) ]:
+                        self._log.debug("Matching pcie_device failed. Required: %s, Available: %s", required.pcie_device, available.pcie_device)
+                        return False
+        elif available.has_field('pcie_device'):
+            self._log.debug("Rejecting available flavor because pcie_device not required but available")
+            return False
+                        
+                    
+        if required.has_field('mempage_size'):
+            self._log.debug("Matching mempage_size")
+            if available.has_field('mempage_size') == False:
+                self._log.debug("Matching mempage_size failed. Not available in flavor")
+                return False
+            else:
+                if required.mempage_size != available.mempage_size:
+                    self._log.debug("Matching mempage_size failed. Required: %s, Available: %s", required.mempage_size, available.mempage_size)
+                    return False
+        elif available.has_field('mempage_size'):
+            self._log.debug("Rejecting available flavor because mempage_size not required but available")
+            return False
+        
+        if required.has_field('cpu_pinning_policy'):
+            self._log.debug("Matching cpu_pinning_policy")
+            if required.cpu_pinning_policy != 'ANY':
+                if available.has_field('cpu_pinning_policy') == False:
+                    self._log.debug("Matching cpu_pinning_policy failed. Not available in flavor")
+                    return False
+                else:
+                    if required.cpu_pinning_policy != available.cpu_pinning_policy:
+                        self._log.debug("Matching cpu_pinning_policy failed. Required: %s, Available: %s", required.cpu_pinning_policy, available.cpu_pinning_policy)
+                        return False
+        elif available.has_field('cpu_pinning_policy'):
+            self._log.debug("Rejecting available flavor because cpu_pinning_policy not required but available")
+            return False
+        
+        if required.has_field('cpu_thread_pinning_policy'):
+            self._log.debug("Matching cpu_thread_pinning_policy")
+            if available.has_field('cpu_thread_pinning_policy') == False:
+                self._log.debug("Matching cpu_thread_pinning_policy failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_thread_pinning_policy != available.cpu_thread_pinning_policy:
+                    self._log.debug("Matching cpu_thread_pinning_policy failed. Required: %s, Available: %s", required.cpu_thread_pinning_policy, available.cpu_thread_pinning_policy)
+                    return False
+        elif available.has_field('cpu_thread_pinning_policy'):
+            self._log.debug("Rejecting available flavor because cpu_thread_pinning_policy not required but available")
+            return False
+
+        if required.has_field('trusted_execution'):
+            self._log.debug("Matching trusted_execution")
+            if required.trusted_execution == True:
+                if available.has_field('trusted_execution') == False:
+                    self._log.debug("Matching trusted_execution failed. Not available in flavor")
+                    return False
+                else:
+                    if required.trusted_execution != available.trusted_execution:
+                        self._log.debug("Matching trusted_execution failed. Required: %s, Available: %s", required.trusted_execution, available.trusted_execution)
+                        return False
+        elif available.has_field('trusted_execution'):
+            self._log.debug("Rejecting available flavor because trusted_execution not required but available")
+            return False
+        
+        if required.has_field('numa_node_policy'):
+            self._log.debug("Matching numa_node_policy")
+            if available.has_field('numa_node_policy') == False:
+                self._log.debug("Matching numa_node_policy failed. Not available in flavor")
+                return False
+            else:
+                if required.numa_node_policy.has_field('node_cnt'):
+                    self._log.debug("Matching numa_node_policy node_cnt")
+                    if available.numa_node_policy.has_field('node_cnt') == False:
+                        self._log.debug("Matching numa_node_policy node_cnt failed. Not available in flavor")
+                        return False
+                    else:
+                        if required.numa_node_policy.node_cnt != available.numa_node_policy.node_cnt:
+                            self._log.debug("Matching numa_node_policy node_cnt failed. Required: %s, Available: %s",required.numa_node_policy.node_cnt, available.numa_node_policy.node_cnt)
+                            return False
+                elif available.numa_node_policy.has_field('node_cnt'):
+                    self._log.debug("Rejecting available flavor because numa node count not required but available")
+                    return False
+                
+                if required.numa_node_policy.has_field('mem_policy'):
+                    self._log.debug("Matching numa_node_policy mem_policy")
+                    if available.numa_node_policy.has_field('mem_policy') == False:
+                        self._log.debug("Matching numa_node_policy mem_policy failed. Not available in flavor")
+                        return False
+                    else:
+                        if required.numa_node_policy.mem_policy != available.numa_node_policy.mem_policy:
+                            self._log.debug("Matching numa_node_policy mem_policy failed. Required: %s, Available: %s", required.numa_node_policy.mem_policy, available.numa_node_policy.mem_policy)
+                            return False
+                elif available.numa_node_policy.has_field('mem_policy'):
+                    self._log.debug("Rejecting available flavor because num node mem_policy not required but available")
+                    return False
+
+                if required.numa_node_policy.has_field('node'):
+                    self._log.debug("Matching numa_node_policy nodes configuration")
+                    if available.numa_node_policy.has_field('node') == False:
+                        self._log.debug("Matching numa_node_policy nodes configuration failed. Not available in flavor")
+                        return False
+                    for required_node in required.numa_node_policy.node:
+                        self._log.debug("Matching numa_node_policy nodes configuration for node %s", required_node)
+                        numa_match = False
+                        for available_node in available.numa_node_policy.node:
+                            if required_node.id != available_node.id:
+                                self._log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
+                                continue
+                            if required_node.vcpu != available_node.vcpu:
+                                self._log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
+                                continue
+                            if required_node.memory_mb != available_node.memory_mb:
+                                self._log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
+                                continue
+                            numa_match = True
+                        if numa_match == False:
+                            return False
+                elif available.numa_node_policy.has_field('node'):
+                    self._log.debug("Rejecting available flavor because numa nodes not required but available")
+                    return False
+        elif available.has_field('numa_node_policy'):
+            self._log.debug("Rejecting available flavor because numa_node_policy not required but available")
+            return False
+        self._log.info("Successful match for Guest EPA attributes")
+        return True
+
+    def _match_vswitch_epa(self, required, available):
+        self._log.debug("VSwitch EPA match found")
+        return True
+
+    def _match_hypervisor_epa(self, required, available):
+        self._log.debug("Hypervisor EPA match found")
+        return True
+
+    def _match_host_epa(self, required, available):
+        self._log.info("Matching Host EPA attributes")
+        if required.has_field('cpu_model'):
+            self._log.debug("Matching CPU model")
+            if available.has_field('cpu_model') == False:
+                self._log.debug("Matching CPU model failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_model.replace('PREFER', 'REQUIRE') != available.cpu_model:
+                    self._log.debug("Matching CPU model failed. Required: %s, Available: %s", required.cpu_model, available.cpu_model)
+                    return False
+        elif available.has_field('cpu_model'):
+            self._log.debug("Rejecting available flavor because cpu_model not required but available")
+            return False
+        
+        if required.has_field('cpu_arch'):
+            self._log.debug("Matching CPU architecture")
+            if available.has_field('cpu_arch') == False:
+                self._log.debug("Matching CPU architecture failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_arch.replace('PREFER', 'REQUIRE') != available.cpu_arch:
+                    self._log.debug("Matching CPU architecture failed. Required: %s, Available: %s", required.cpu_arch, available.cpu_arch)
+                    return False
+        elif available.has_field('cpu_arch'):
+            self._log.debug("Rejecting available flavor because cpu_arch not required but available")
+            return False
+        
+        if required.has_field('cpu_vendor'):
+            self._log.debug("Matching CPU vendor")
+            if available.has_field('cpu_vendor') == False:
+                self._log.debug("Matching CPU vendor failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_vendor.replace('PREFER', 'REQUIRE') != available.cpu_vendor:
+                    self._log.debug("Matching CPU vendor failed. Required: %s, Available: %s", required.cpu_vendor, available.cpu_vendor)
+                    return False
+        elif available.has_field('cpu_vendor'):
+            self._log.debug("Rejecting available flavor because cpu_vendor not required but available")
+            return False
+
+        if required.has_field('cpu_socket_count'):
+            self._log.debug("Matching CPU socket count")
+            if available.has_field('cpu_socket_count') == False:
+                self._log.debug("Matching CPU socket count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_socket_count != available.cpu_socket_count:
+                    self._log.debug("Matching CPU socket count failed. Required: %s, Available: %s", required.cpu_socket_count, available.cpu_socket_count)
+                    return False
+        elif available.has_field('cpu_socket_count'):
+            self._log.debug("Rejecting available flavor because cpu_socket_count not required but available")
+            return False
+        
+        if required.has_field('cpu_core_count'):
+            self._log.debug("Matching CPU core count")
+            if available.has_field('cpu_core_count') == False:
+                self._log.debug("Matching CPU core count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_core_count != available.cpu_core_count:
+                    self._log.debug("Matching CPU core count failed. Required: %s, Available: %s", required.cpu_core_count, available.cpu_core_count)
+                    return False
+        elif available.has_field('cpu_core_count'):
+            self._log.debug("Rejecting available flavor because cpu_core_count not required but available")
+            return False
+        
+        if required.has_field('cpu_core_thread_count'):
+            self._log.debug("Matching CPU core thread count")
+            if available.has_field('cpu_core_thread_count') == False:
+                self._log.debug("Matching CPU core thread count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_core_thread_count != available.cpu_core_thread_count:
+                    self._log.debug("Matching CPU core thread count failed. Required: %s, Available: %s", required.cpu_core_thread_count, available.cpu_core_thread_count)
+                    return False
+        elif available.has_field('cpu_core_thread_count'):
+            self._log.debug("Rejecting available flavor because cpu_core_thread_count not required but available")
+            return False
+    
+        if required.has_field('cpu_feature'):
+            self._log.debug("Matching CPU feature list")
+            if available.has_field('cpu_feature') == False:
+                self._log.debug("Matching CPU feature list failed. Not available in flavor")
+                return False
+            else:
+                for feature in required.cpu_feature:
+                    if feature not in available.cpu_feature:
+                        self._log.debug("Matching CPU feature list failed. Required feature: %s is not present. Available features: %s", feature, available.cpu_feature)
+                        return False
+        elif available.has_field('cpu_feature'):
+            self._log.debug("Rejecting available flavor because cpu_feature not required but available")
+            return False
+        self._log.info("Successful match for Host EPA attributes")            
+        return True
+
+
+    def _match_placement_group_inputs(self, required, available):
+        self._log.info("Matching Host aggregate attributes")
+        
+        if not required and not available:
+            # Host aggregate not required and not available => success
+            self._log.info("Successful match for Host Aggregate attributes")
+            return True
+        if required and available:
+            # Host aggregate requested and available => Do a match and decide
+            xx = [ x.as_dict() for x in required ]
+            yy = [ y.as_dict() for y in available ]
+            for i in xx:
+                if i not in yy:
+                    self._log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s ", required, available)
+                    return False
+            self._log.info("Successful match for Host Aggregate attributes")
+            return True
+        else:
+            # Either of following conditions => Failure
+            #  - Host aggregate required but not available
+            #  - Host aggregate not required but available
+            self._log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s ", required, available)
+            return False
+                    
+    
+    def match_image_params(self, resource_info, request_params):
+        return True
+
+    def match_epa_params(self, resource_info, request_params):
+        result = self._match_vm_flavor(getattr(request_params, 'vm_flavor'),
+                                       getattr(resource_info, 'vm_flavor'))
+        if result == False:
+            self._log.debug("VM Flavor mismatched")
+            return False
+
+        result = self._match_guest_epa(getattr(request_params, 'guest_epa'),
+                                       getattr(resource_info, 'guest_epa'))
+        if result == False:
+            self._log.debug("Guest EPA mismatched")
+            return False
+
+        result = self._match_vswitch_epa(getattr(request_params, 'vswitch_epa'),
+                                         getattr(resource_info, 'vswitch_epa'))
+        if result == False:
+            self._log.debug("Vswitch EPA mismatched")
+            return False
+
+        result = self._match_hypervisor_epa(getattr(request_params, 'hypervisor_epa'),
+                                            getattr(resource_info, 'hypervisor_epa'))
+        if result == False:
+            self._log.debug("Hypervisor EPA mismatched")
+            return False
+
+        result = self._match_host_epa(getattr(request_params, 'host_epa'),
+                                      getattr(resource_info, 'host_epa'))
+        if result == False:
+            self._log.debug("Host EPA mismatched")
+            return False
+
+        result = self._match_placement_group_inputs(getattr(request_params, 'host_aggregate'),
+                                                    getattr(resource_info, 'host_aggregate'))
+
+        if result == False:
+            self._log.debug("Host Aggregate mismatched")
+            return False
+        
+        return True
+
+    @asyncio.coroutine
+    def initialize_resource_in_cal(self, resource, request):
+        self._log.info("Initializing the compute-resource with id: %s in RW.CAL", resource.resource_id)
+        modify_params = RwcalYang.VDUModifyParams()
+        modify_params.vdu_id = resource.resource_id
+        modify_params.image_id = request.image_id
+
+        for c_point in request.connection_points:
+            self._log.debug("Adding connection point for VDU: %s to virtual-compute with id: %s  Connection point Name: %s",
+                            request.name,resource.resource_id,c_point.name)
+            point = modify_params.connection_points_add.add()
+            point.name = c_point.name
+            point.virtual_link_id = c_point.virtual_link_id
+        yield from self._cal.modify_virtual_compute(modify_params)
+
+    @asyncio.coroutine        
+    def uninitialize_resource_in_cal(self, resource):
+        self._log.info("Un-initializing the compute-resource with id: %s in RW.CAL", resource.resource_id)
+        modify_params = RwcalYang.VDUModifyParams()
+        modify_params.vdu_id = resource.resource_id
+        resource_info =  yield from self.get_resource_info(resource)
+        for c_point in resource_info.connection_points:
+            self._log.debug("Removing connection point: %s from VDU: %s ",
+                            c_point.name,resource_info.name)
+            point = modify_params.connection_points_remove.add()
+            point.connection_point_id = c_point.connection_point_id
+        yield from self._cal.modify_virtual_compute(modify_params)
+
+
+class ResourceMgrCore(object):
+    def __init__(self, dts, log, log_hdl, loop, parent):
+        self._log = log
+        self._log_hdl = log_hdl
+        self._dts = dts
+        self._loop = loop
+        self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
+        self._parent = parent
+        self._cloud_cals = {}
+        # Dictionary of pool objects keyed by name
+        self._cloud_pool_table = {}
+        # Dictionary of tuples (resource_id, cloud_account_name, pool_name) keyed by event_id
+        self._resource_table = {}
+        self._pool_class = {'compute': ComputePool,
+                            'network': NetworkPool}
+
+    def _get_cloud_pool_table(self, cloud_account_name):
+        if cloud_account_name not in self._cloud_pool_table:
+            msg = "Cloud account %s not found" % cloud_account_name
+            self._log.error(msg)
+            raise ResMgrCloudAccountNotFound(msg)
+
+        return self._cloud_pool_table[cloud_account_name]
+
+    def _get_cloud_cal_plugin(self, cloud_account_name):
+        if cloud_account_name not in self._cloud_cals:
+            msg = "Cloud account %s not found" % cloud_account_name
+            self._log.error(msg)
+            raise ResMgrCloudAccountNotFound(msg)
+
+        return self._cloud_cals[cloud_account_name]
+
+    def _add_default_cloud_pools(self, cloud_account_name):
+        self._log.debug("Adding default compute and network pools for cloud account %s",
+                        cloud_account_name)
+        default_pools = [
+                    {
+                        'name': '____default_compute_pool',
+                        'resource_type': 'compute',
+                        'pool_type': 'dynamic',
+                        'max_size': 128,
+                    },
+                    {
+                        'name': '____default_network_pool',
+                        'resource_type': 'network',
+                        'pool_type': 'dynamic',
+                        'max_size': 128,
+                    },
+                ]
+
+        for pool_dict in default_pools:
+            pool_info = ResourcePoolInfo.from_dict(pool_dict)
+            self._log.info("Applying configuration for cloud account %s pool: %s",
+                           cloud_account_name, pool_info.name)
+
+            self.add_resource_pool(cloud_account_name, pool_info)
+            self.unlock_resource_pool(cloud_account_name, pool_info.name)
+
+    def get_cloud_account_names(self):
+        """ Returns a list of configured cloud account names """
+        return self._cloud_cals.keys()
+
+    def add_cloud_account(self, account):
+        self._log.debug("Received CAL account. Account Name: %s, Account Type: %s",
+                        account.name, account.account_type)
+
+        ### Add cal handler to all the pools
+        if account.name in self._cloud_cals:
+            raise ResMgrCloudAccountExists("Cloud account already exists in res mgr: %s",
+                                           account.name)
+
+        self._cloud_pool_table[account.name] = {}
+
+        cal = ResourceMgrCALHandler(self._loop, self._executor, self._log, self._log_hdl, account)
+        self._cloud_cals[account.name] = cal
+
+        self._add_default_cloud_pools(account.name)
+
+    def update_cloud_account(self, account):
+        raise NotImplementedError("Update cloud account not implemented")
+
+    def delete_cloud_account(self, account_name, dry_run=False):
+        cloud_pool_table = self._get_cloud_pool_table(account_name)
+        for pool in cloud_pool_table.values():
+            if pool.in_use():
+                raise ResMgrCloudAccountInUse("Cannot delete cloud which is currently in use")
+
+        # If dry_run is specified, do not actually delete the cloud account
+        if dry_run:
+            return
+
+        for pool in list(cloud_pool_table):
+            self.delete_resource_pool(account_name, pool)
+
+        del self._cloud_pool_table[account_name]
+        del self._cloud_cals[account_name]
+
+    def add_resource_pool(self, cloud_account_name, pool_info):
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        if pool_info.name in cloud_pool_table:
+            raise ResMgrDuplicatePool("Pool with name: %s already exists", pool_info.name)
+
+        cloud_cal = self._get_cloud_cal_plugin(cloud_account_name)
+        pool = self._pool_class[pool_info.resource_type](self._log, self._loop, pool_info, cloud_cal)
+
+        cloud_pool_table[pool_info.name] = pool
+
+    def delete_resource_pool(self, cloud_account_name, pool_name):
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        if pool_name not in cloud_pool_table:
+            self._log.error("Pool: %s not found for deletion", pool_name)
+            return
+        pool = cloud_pool_table[pool_name]
+
+        if pool.in_use():
+            # Can't delete a pool in use
+            self._log.error("Pool: %s in use. Can not delete in-use pool", pool.name)
+            return
+
+        pool.cleanup()
+        del cloud_pool_table[pool_name]
+        self._log.info("Resource Pool: %s successfully deleted", pool_name)
+
+    def modify_resource_pool(self, cloud_account_name, pool):
+        pass
+
+    def lock_resource_pool(self, cloud_account_name, pool_name):
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        if pool_name not in cloud_pool_table:
+            self._log.info("Pool: %s is not available for lock operation")
+            return
+
+        pool = cloud_pool_table[pool_name]
+        pool.lock_pool()
+
+    def unlock_resource_pool(self, cloud_account_name, pool_name):
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        if pool_name not in cloud_pool_table:
+            self._log.info("Pool: %s is not available for unlock operation")
+            return
+
+        pool = cloud_pool_table[pool_name]
+        pool.unlock_pool()
+
+    def get_resource_pool_info(self, cloud_account_name, pool_name):
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        if pool_name in cloud_pool_table:
+            pool = cloud_pool_table[pool_name]
+            return pool.get_pool_info()
+        else:
+            return None
+
+    def get_resource_pool_list(self, cloud_account_name):
+        return [v for _, v in self._get_cloud_pool_table(cloud_account_name).items()]
+
+    def _select_resource_pools(self, cloud_account_name, resource_type):
+        pools = [pool for pool in self.get_resource_pool_list(cloud_account_name) if pool.resource_type == resource_type and pool.status == 'unlocked']
+        if not pools:
+            raise ResMgrPoolNotAvailable("No %s pool found for resource allocation", resource_type)
+
+        return pools[0]
+
+    @asyncio.coroutine
+    def allocate_virtual_resource(self, event_id, cloud_account_name, request, resource_type):
+        ### Check if event_id is unique or already in use
+        if event_id in self._resource_table:
+            r_id, cloud_account_name, pool_name = self._resource_table[event_id]
+            self._log.warning("Requested event-id :%s for resource-allocation already active with pool: %s",
+                              event_id, pool_name)
+            # If resource-type matches then return the same resource
+            cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+            pool = cloud_pool_table[pool_name]
+            if pool.resource_type == resource_type:
+
+                info = yield from pool.read_resource_info(r_id)
+                return info
+            else:
+                self._log.error("Event-id conflict. Duplicate event-id: %s", event_id)
+                raise ResMgrDuplicateEventId("Requested event-id :%s already active with pool: %s" %(event_id, pool_name))
+
+        ### All-OK, lets go ahead with resource allocation
+        pool = self._select_resource_pools(cloud_account_name, resource_type)
+        self._log.info("Selected pool %s for resource allocation", pool.name)
+
+        r_id, r_info = yield from pool.allocate_resource(request)
+
+        self._resource_table[event_id] = (r_id, cloud_account_name, pool.name)
+        return r_info
+
+    @asyncio.coroutine
+    def reallocate_virtual_resource(self, event_id, cloud_account_name, request, resource_type, resource):
+        ### Check if event_id is unique or already in use
+        if event_id in self._resource_table:
+            r_id, cloud_account_name, pool_name = self._resource_table[event_id]
+            self._log.warning("Requested event-id :%s for resource-allocation already active with pool: %s",
+                              event_id, pool_name)
+            # If resource-type matches then return the same resource
+            cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+            pool = cloud_pool_table[pool_name]
+            if pool.resource_type == resource_type:
+                info = yield from pool.read_resource_info(r_id)
+                return info
+            else:
+                self._log.error("Event-id conflict. Duplicate event-id: %s", event_id)
+                raise ResMgrDuplicateEventId("Requested event-id :%s already active with pool: %s" %(event_id, pool_name))
+
+        r_info = None
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        pool = cloud_pool_table[resource.pool_name]
+        if pool.resource_type == resource_type:
+            if resource_type == 'network':
+              r_id = resource.virtual_link_id
+              r_info = yield from pool.get_info_by_id(resource.virtual_link_id)
+            elif resource_type == 'compute':
+              r_id = resource.vdu_id
+              r_info = yield from pool.get_info_by_id(resource.vdu_id)
+
+        if r_info is None:
+            r_id, r_info = yield from pool.allocate_resource(request)
+            self._resource_table[event_id] = (r_id, cloud_account_name, resource.pool_name)
+            return r_info
+
+        self._resource_table[event_id] = (r_id, cloud_account_name, resource.pool_name)
+        new_resource = pool._resource_class(r_id, 'dynamic')
+        if resource_type == 'compute':
+            requested_params = RwcalYang.VDUInitParams()
+            requested_params.from_dict(request.as_dict())
+            new_resource.requested_params = requested_params
+        pool._all_resources[r_id] = new_resource
+        pool._allocated_resources[r_id] = new_resource
+        return r_info
+
+    @asyncio.coroutine
+    def release_virtual_resource(self, event_id, resource_type):
+        ### Check if event_id exists
+        if event_id not in self._resource_table:
+            self._log.error("Received resource-release-request with unknown Event-id :%s", event_id)
+            raise ResMgrUnknownEventId("Received resource-release-request with unknown Event-id :%s" %(event_id))
+
+        ## All-OK, lets proceed with resource release
+        r_id, cloud_account_name, pool_name = self._resource_table.pop(event_id)
+        self._log.debug("Attempting to release virtual resource id %s from pool %s",
+                        r_id, pool_name)
+
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        pool = cloud_pool_table[pool_name]
+        yield from pool.release_resource(r_id)
+
+    @asyncio.coroutine
+    def read_virtual_resource(self, event_id, resource_type):
+        ### Check if event_id exists
+        if event_id not in self._resource_table:
+            self._log.error("Received resource-read-request with unknown Event-id :%s", event_id)
+            raise ResMgrUnknownEventId("Received resource-read-request with unknown Event-id :%s" %(event_id))
+
+        ## All-OK, lets proceed
+        r_id, cloud_account_name, pool_name = self._resource_table[event_id]
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        pool = cloud_pool_table[pool_name]
+        info = yield from pool.read_resource_info(r_id)
+        return info
diff --git a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py
new file mode 100755
index 0000000..5f87c66
--- /dev/null
+++ b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py
@@ -0,0 +1,314 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import sys
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import (
+    RwDts as rwdts,
+    RwYang,
+    RwResourceMgrYang,
+    RwLaunchpadYang,
+    RwcalYang,
+)
+
+from gi.repository.RwTypes import RwStatus
+import rift.tasklets
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class ResourceMgrEvent(object):
+    VDU_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data"
+    VLINK_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data"
+
+    def __init__(self, dts, log, loop, parent):
+        self._log = log
+        self._dts = dts
+        self._loop = loop
+        self._parent = parent
+        self._vdu_reg = None
+        self._link_reg = None
+
+        self._vdu_reg_event = asyncio.Event(loop=self._loop)
+        self._link_reg_event = asyncio.Event(loop=self._loop)
+
+    @asyncio.coroutine
+    def wait_ready(self, timeout=5):
+        self._log.debug("Waiting for all request registrations to become ready.")
+        yield from asyncio.wait([self._link_reg_event.wait(), self._vdu_reg_event.wait()],
+                                timeout=timeout, loop=self._loop)
+
+    def create_record_dts(self, regh, xact, path, msg):
+        """
+        Create a record in DTS with path and message
+        """
+        self._log.debug("Creating Resource Record xact = %s, %s:%s",
+                        xact, path, msg)
+        regh.create_element(path, msg)
+
+    def delete_record_dts(self, regh, xact, path):
+        """
+        Delete a VNFR record in DTS with path and message
+        """
+        self._log.debug("Deleting Resource Record xact = %s, %s",
+                        xact, path)
+        regh.delete_element(path)
+
+    @asyncio.coroutine
+    def register(self):
+        @asyncio.coroutine
+        def onlink_event(dts, g_reg, xact, xact_event, scratch_data):
+            @asyncio.coroutine
+            def instantiate_realloc_vn(link):
+                """Re-populate the virtual link information after restart
+
+                Arguments:
+                    vlink 
+
+                """
+                # wait for 3 seconds
+                yield from asyncio.sleep(3, loop=self._loop)
+
+                response_info = yield from self._parent.reallocate_virtual_network(link.event_id,
+                                                                                 link.cloud_account,
+                                                                                 link.request_info, link.resource_info,
+                                                                                 )
+            if (xact_event == rwdts.MemberEvent.INSTALL):
+              link_cfg = self._link_reg.elements
+              for link in link_cfg:
+                self._loop.create_task(instantiate_realloc_vn(link))
+            return rwdts.MemberRspCode.ACTION_OK
+
+        @asyncio.coroutine
+        def onvdu_event(dts, g_reg, xact, xact_event, scratch_data):
+            @asyncio.coroutine
+            def instantiate_realloc_vdu(vdu):
+                """Re-populate the VDU information after restart
+
+                Arguments:
+                    vdu 
+
+                """
+                # wait for 3 seconds
+                yield from asyncio.sleep(3, loop=self._loop)
+
+                response_info = yield from self._parent.allocate_virtual_compute(vdu.event_id,
+                                                                                 vdu.cloud_account,
+                                                                                 vdu.request_info
+                                                                                 )
+            if (xact_event == rwdts.MemberEvent.INSTALL):
+              vdu_cfg = self._vdu_reg.elements
+              for vdu in vdu_cfg:
+                self._loop.create_task(instantiate_realloc_vdu(vdu))
+            return rwdts.MemberRspCode.ACTION_OK
+
+        def on_link_request_commit(xact_info):
+            """ The transaction has been committed """
+            self._log.debug("Received link request commit (xact_info: %s)", xact_info)
+            return rwdts.MemberRspCode.ACTION_OK
+
+        @asyncio.coroutine
+        def on_link_request_prepare(xact_info, action, ks_path, request_msg):
+            self._log.debug("Received virtual-link on_prepare callback (xact_info: %s, action: %s): %s",
+                            xact_info, action, request_msg)
+
+            response_info = None
+            response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+
+            schema = RwResourceMgrYang.VirtualLinkEventData().schema()
+            pathentry = schema.keyspec_to_entry(ks_path)
+
+            if action == rwdts.QueryAction.CREATE:
+                try:
+                    response_info = yield from self._parent.allocate_virtual_network(pathentry.key00.event_id,
+                                                                                 request_msg.cloud_account,
+                                                                                 request_msg.request_info)
+                except Exception as e:
+                    self._log.error("Encountered exception: %s while creating virtual network", str(e))
+                    self._log.exception(e)
+                    response_info = RwResourceMgrYang.VirtualLinkEventData_ResourceInfo()
+                    response_info.resource_state = 'failed'
+                    response_info.resource_errors = str(e)
+                    yield from self._dts.query_update(response_xpath,
+                                                      rwdts.XactFlag.ADVISE,
+                                                      response_info)
+                else:
+                    request_msg.resource_info = response_info
+                    self.create_record_dts(self._link_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()), request_msg)
+            elif action == rwdts.QueryAction.DELETE:
+                yield from self._parent.release_virtual_network(pathentry.key00.event_id)
+                self.delete_record_dts(self._link_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()))
+            elif action == rwdts.QueryAction.READ:
+                response_info = yield from self._parent.read_virtual_network_info(pathentry.key00.event_id)
+            else:
+                raise ValueError("Only read/create/delete actions available. Received action: %s" %(action))
+
+            self._log.debug("Responding with VirtualLinkInfo at xpath %s: %s.",
+                            response_xpath, response_info)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info)
+
+
+        def on_vdu_request_commit(xact_info):
+            """ The transaction has been committed """
+            self._log.debug("Received vdu request commit (xact_info: %s)", xact_info)
+            return rwdts.MemberRspCode.ACTION_OK
+
+        def monitor_vdu_state(response_xpath, pathentry):
+            self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath)
+            loop_cnt = 180
+            for i in range(loop_cnt):
+                self._log.debug("VDU state monitoring for xpath: %s. Sleeping for 1 second", response_xpath)
+                yield from asyncio.sleep(1, loop = self._loop)
+                try:
+                    response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id)
+                except Exception as e:
+                    self._log.info("VDU state monitoring: Received exception %s in VDU state monitoring for %s. Aborting monitoring",
+                                   str(e),response_xpath)
+                    response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+                    response_info.resource_state = 'failed'
+                    response_info.resource_errors = str(e)
+                    yield from self._dts.query_update(response_xpath,
+                                                      rwdts.XactFlag.ADVISE,
+                                                      response_info)
+                else:
+                    if response_info.resource_state == 'active' or response_info.resource_state == 'failed':
+                        self._log.info("VDU state monitoring: VDU reached terminal state. Publishing VDU info: %s at path: %s",
+                                       response_info, response_xpath)
+                        yield from self._dts.query_update(response_xpath,
+                                                          rwdts.XactFlag.ADVISE,
+                                                          response_info)
+                        return
+            else:
+                ### End of loop. This is only possible if VDU did not reach active state
+                err_msg = "VDU state monitoring: VDU at xpath :{} did not reached active state in {} seconds. Aborting monitoring".format(response_xpath, loop_cnt)
+                self._log.info(err_msg)
+                response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+                response_info.resource_state = 'failed'
+                response_info.resource_errors = err_msg
+                yield from self._dts.query_update(response_xpath,
+                                                  rwdts.XactFlag.ADVISE,
+                                                  response_info)
+            return
+
+        def allocate_vdu_task(ks_path, event_id, cloud_account, request_msg):
+            response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+            schema = RwResourceMgrYang.VDUEventData().schema()
+            pathentry = schema.keyspec_to_entry(ks_path)
+            try:
+                response_info = yield from self._parent.allocate_virtual_compute(event_id,
+                                                                                 cloud_account,
+                                                                                 request_msg,)
+            except Exception as e:
+                self._log.error("Encountered exception : %s while creating virtual compute", str(e))
+                response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+                response_info.resource_state = 'failed'
+                response_info.resource_errors = str(e)
+                yield from self._dts.query_update(response_xpath,
+                                                  rwdts.XactFlag.ADVISE,
+                                                  response_info)
+            else:
+                if response_info.resource_state == 'failed' or response_info.resource_state == 'active' :
+                    self._log.info("Virtual compute create task completed. Publishing VDU info: %s at path: %s",
+                                   response_info, response_xpath)
+                    yield from self._dts.query_update(response_xpath,
+                                                      rwdts.XactFlag.ADVISE,
+                                                      response_info)
+                else:
+                    asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry),
+                                          loop = self._loop)
+
+
+        @asyncio.coroutine
+        def on_vdu_request_prepare(xact_info, action, ks_path, request_msg):
+            self._log.debug("Received vdu on_prepare callback (xact_info: %s, action: %s): %s",
+                            xact_info, action, request_msg)
+            response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+            schema = RwResourceMgrYang.VDUEventData().schema()
+            pathentry = schema.keyspec_to_entry(ks_path)
+
+            if action == rwdts.QueryAction.CREATE:
+                response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+                response_info.resource_state = 'pending'
+                request_msg.resource_info = response_info
+                self.create_record_dts(self._vdu_reg,
+                                       None,
+                                       ks_path.to_xpath(RwResourceMgrYang.get_schema()),
+                                       request_msg)
+                asyncio.ensure_future(allocate_vdu_task(ks_path,
+                                                        pathentry.key00.event_id,
+                                                        request_msg.cloud_account,
+                                                        request_msg.request_info),
+                                      loop = self._loop)
+            elif action == rwdts.QueryAction.DELETE:
+                response_info = None
+                yield from self._parent.release_virtual_compute(pathentry.key00.event_id)
+                self.delete_record_dts(self._vdu_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()))
+            elif action == rwdts.QueryAction.READ:
+                response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id)
+            else:
+                raise ValueError("Only create/delete actions available. Received action: %s" %(action))
+
+            self._log.debug("Responding with VDUInfo at xpath %s: %s",
+                            response_xpath, response_info)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info)
+
+
+        @asyncio.coroutine
+        def on_request_ready(registration, status):
+            self._log.debug("Got request ready event (registration: %s) (status: %s)",
+                            registration, status)
+
+            if registration == self._link_reg:
+                self._link_reg_event.set()
+            elif registration == self._vdu_reg:
+                self._vdu_reg_event.set()
+            else:
+                self._log.error("Unknown registration ready event: %s", registration)
+
+        link_handlers = rift.tasklets.Group.Handler(on_event=onlink_event,)
+        with self._dts.group_create(handler=link_handlers) as link_group:
+            self._log.debug("Registering for Link Resource Request using xpath: %s",
+                            ResourceMgrEvent.VLINK_REQUEST_XPATH)
+
+            self._link_reg = link_group.register(xpath=ResourceMgrEvent.VLINK_REQUEST_XPATH,
+                                            handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
+                                                                                          on_commit=on_link_request_commit,
+                                                                                          on_prepare=on_link_request_prepare),
+                                            flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
+
+        vdu_handlers = rift.tasklets.Group.Handler(on_event=onvdu_event, )
+        with self._dts.group_create(handler=vdu_handlers) as vdu_group:
+
+            self._log.debug("Registering for VDU Resource Request using xpath: %s",
+                            ResourceMgrEvent.VDU_REQUEST_XPATH)
+
+            self._vdu_reg = vdu_group.register(xpath=ResourceMgrEvent.VDU_REQUEST_XPATH,
+                                           handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
+                                                                                         on_commit=on_vdu_request_commit,
+                                                                                         on_prepare=on_vdu_request_prepare),
+                                           flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
+
diff --git a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py
new file mode 100755
index 0000000..cdcadc7
--- /dev/null
+++ b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py
@@ -0,0 +1,232 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import logging
+import sys
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import (
+    RwDts as rwdts,
+    RwYang,
+    RwResourceMgrYang,
+    RwLaunchpadYang,
+    RwcalYang,
+)
+
+import rift.tasklets
+
+from . import rwresmgr_core as Core
+from . import rwresmgr_config as Config
+from . import rwresmgr_events as Event
+
+
+class ResourceManager(object):
+    def __init__(self, log, log_hdl, loop, dts):
+        self._log            = log
+        self._log_hdl        = log_hdl
+        self._loop           = loop
+        self._dts            = dts
+        self.config_handler  = Config.ResourceMgrConfig(self._dts, self._log, self._log_hdl, self._loop, self)
+        self.event_handler   = Event.ResourceMgrEvent(self._dts, self._log, self._loop, self)
+        self.core            = Core.ResourceMgrCore(self._dts, self._log, self._log_hdl, self._loop, self)
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self.config_handler.register()
+        yield from self.event_handler.register()
+
+    def add_cloud_account_config(self, account):
+        self._log.debug("Received Cloud-Account add config event for account: %s", account.name)
+        self.core.add_cloud_account(account)
+
+    def update_cloud_account_config(self, account):
+        self._log.debug("Received Cloud-Account update config event for account: %s", account.name)
+        self.core.update_cloud_account(account)
+
+    def delete_cloud_account_config(self, account_name, dry_run=False):
+        self._log.debug("Received Cloud-Account delete event for account (dry_run: %s): %s",
+                        dry_run, account_name)
+        self.core.delete_cloud_account(account_name, dry_run)
+
+    def get_cloud_account_names(self):
+        cloud_account_names = self.core.get_cloud_account_names()
+        return cloud_account_names
+
+    def pool_add(self, cloud_account_name, pool):
+        self._log.debug("Received Pool add event for cloud account %s pool: %s",
+                        cloud_account_name, pool.name)
+        self.core.add_resource_pool(cloud_account_name, pool)
+
+    def pool_modify(self, cloud_account_name, pool):
+        self._log.debug("Received Pool modify event for cloud account %s pool: %s",
+                        cloud_account_name, pool.name)
+        self.core.modify_resource_pool(cloud_account_name, pool)
+
+    def pool_delete(self, cloud_account_name, pool_name):
+        self._log.debug("Received Pool delete event for cloud account %s pool: %s",
+                        cloud_account_name, pool_name)
+        self.core.delete_resource_pool(cloud_account_name, pool_name)
+
+    def get_pool_list(self, cloud_account_name):
+        return self.core.get_resource_pool_list(cloud_account_name)
+
+    def get_pool_info(self, cloud_account_name, pool_name):
+        self._log.debug("Received get-pool-info event for cloud account %s pool: %s",
+                        cloud_account_name, pool_name)
+        return self.core.get_resource_pool_info(cloud_account_name, pool_name)
+
+    def lock_pool(self, cloud_account_name, pool_name):
+        self._log.debug("Received pool unlock event for pool: %s",
+                        cloud_account_name, pool_name)
+        self.core.lock_resource_pool(cloud_account_name, pool_name)
+
+    def unlock_pool(self, cloud_account_name, pool_name):
+        self._log.debug("Received pool unlock event for pool: %s",
+                        cloud_account_name, pool_name)
+        self.core.unlock_resource_pool(cloud_account_name, pool_name)
+
+    @asyncio.coroutine
+    def allocate_virtual_network(self, event_id, cloud_account_name, request):
+        self._log.info("Received network resource allocation request with event-id: %s", event_id)
+        resource = yield from self.core.allocate_virtual_resource(event_id, cloud_account_name, request, 'network')
+        return resource
+
+    @asyncio.coroutine
+    def reallocate_virtual_network(self, event_id, cloud_account_name, request, resource):
+        self._log.info("Received network resource allocation request with event-id: %s", event_id)
+        resource = yield from self.core.reallocate_virtual_resource(event_id, cloud_account_name, request, 'network', resource)
+        return resource
+
+    @asyncio.coroutine
+    def release_virtual_network(self, event_id):
+        self._log.info("Received network resource release request with event-id: %s", event_id)
+        yield from self.core.release_virtual_resource(event_id, 'network')
+
+    @asyncio.coroutine
+    def read_virtual_network_info(self, event_id):
+        self._log.info("Received network resource read request with event-id: %s", event_id)
+        info = yield from self.core.read_virtual_resource(event_id, 'network')
+        return info
+
+    @asyncio.coroutine
+    def allocate_virtual_compute(self, event_id, cloud_account_name, request):
+        self._log.info("Received compute resource allocation request "
+                       "(cloud account: %s) with event-id: %s",
+                       cloud_account_name, event_id)
+        resource = yield from self.core.allocate_virtual_resource(
+                event_id, cloud_account_name, request, 'compute',
+                )
+        return resource
+
+    @asyncio.coroutine
+    def reallocate_virtual_compute(self, event_id, cloud_account_name, request, resource):
+        self._log.info("Received compute resource allocation request "
+                       "(cloud account: %s) with event-id: %s",
+                       cloud_account_name, event_id)
+        resource = yield from self.core.reallocate_virtual_resource(
+                event_id, cloud_account_name, request, 'compute', resource, 
+                )
+        return resource
+
+    @asyncio.coroutine
+    def release_virtual_compute(self, event_id):
+        self._log.info("Received compute resource release request with event-id: %s", event_id)
+        yield from self.core.release_virtual_resource(event_id, 'compute')
+
+    @asyncio.coroutine
+    def read_virtual_compute_info(self, event_id):
+        self._log.info("Received compute resource read request with event-id: %s", event_id)
+        info = yield from self.core.read_virtual_resource(event_id, 'compute')
+        return info
+
+
+class ResMgrTasklet(rift.tasklets.Tasklet):
+    def __init__(self, *args, **kwargs):
+        super(ResMgrTasklet, self).__init__(*args, **kwargs)
+        self.rwlog.set_category("rw-resource-mgr-log")
+        self._dts = None
+        self._resource_manager = None
+
+    def start(self):
+        super(ResMgrTasklet, self).start()
+        self.log.info("Starting ResMgrTasklet")
+
+        self.log.debug("Registering with dts")
+
+        self._dts = rift.tasklets.DTS(self.tasklet_info,
+                                      RwResourceMgrYang.get_schema(),
+                                      self.loop,
+                                      self.on_dts_state_change)
+
+        self.log.debug("Created DTS Api GI Object: %s", self._dts)
+
+    def stop(self):
+      try:
+         self._dts.deinit()
+      except Exception:
+         print("Caught Exception in RESMGR stop:", sys.exc_info()[0])
+         raise
+
+    def on_instance_started(self):
+        self.log.debug("Got instance started callback")
+
+    @asyncio.coroutine
+    def init(self):
+        self._log.info("Initializing the Resource Manager tasklet")
+        self._resource_manager = ResourceManager(self.log,
+                                                 self.log_hdl,
+                                                 self.loop,
+                                                 self._dts)
+        yield from self._resource_manager.register()
+
+    @asyncio.coroutine
+    def run(self):
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Take action according to current dts state to transition
+        application into the corresponding application state
+
+        Arguments
+            state - current dts state
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self._dts.handle.set_state(next_state)
diff --git a/rwlaunchpad/plugins/rwresmgr/rwresmgrtasklet.py b/rwlaunchpad/plugins/rwresmgr/rwresmgrtasklet.py
new file mode 100755
index 0000000..55d2329
--- /dev/null
+++ b/rwlaunchpad/plugins/rwresmgr/rwresmgrtasklet.py
@@ -0,0 +1,27 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwresmgrtasklet
+class Tasklet(rift.tasklets.rwresmgrtasklet.ResMgrTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py b/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py
new file mode 100755
index 0000000..87d11a2
--- /dev/null
+++ b/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py
@@ -0,0 +1,781 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import asyncio
+import logging
+import os
+import sys
+import types
+import unittest
+import uuid
+import random
+
+import xmlrunner
+
+import gi
+gi.require_version('CF', '1.0')
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwMain', '1.0')
+gi.require_version('RwManifestYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwCal', '1.0')
+
+
+import gi.repository.CF as cf
+import gi.repository.RwDts as rwdts
+import gi.repository.RwMain as rwmain
+import gi.repository.RwManifestYang as rwmanifest
+import gi.repository.RwResourceMgrYang as rmgryang
+from gi.repository import RwcalYang
+from gi.repository import RwCloudYang
+from gi.repository.RwTypes import RwStatus
+
+import rw_peas
+import rift.tasklets
+import rift.test.dts
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+openstack_info = {
+    'username'      : 'pluto',
+    'password'      : 'mypasswd',
+    'auth_url'      : 'http://10.66.4.14:5000/v3/',
+    'project_name'  : 'demo',
+    'mgmt_network'  : 'private',
+    'image_id'      : '5cece2b1-1a49-42c5-8029-833c56574652',
+    'vms'           : ['res-test-1', 'res-test-2'],
+    'networks'      : ['testnet1', 'testnet2']}
+
+
+def create_mock_resource_temaplate():
+    ### Resource to be reuqested for 'mock'
+    resource_requests = {'compute': {}, 'network': {}}
+
+    ###### mycompute-0
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-0'))
+    msg.vm_flavor.vcpu_count = 4
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 40
+    resource_requests['compute']['mycompute-0'] = msg
+
+    ###### mycompute-1
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-1'))
+    msg.vm_flavor.vcpu_count = 2
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 20
+    resource_requests['compute']['mycompute-1'] = msg
+
+    ####### mynet-0
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    resource_requests['network']['mynet-0'] = msg
+
+    ####### mynet-1
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    resource_requests['network']['mynet-1'] = msg
+
+    return resource_requests
+
+
+def create_cloudsim_resource_template():
+    ### Resource to be reuqested for 'cloudsim'
+    resource_requests = {'compute': {}, 'network': {}}
+
+    ###### mycompute-0
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = "1"
+    msg.vm_flavor.vcpu_count = 4
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 40
+    resource_requests['compute']['mycompute-0'] = msg
+
+    ###### mycompute-1
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = "1"
+    msg.vm_flavor.vcpu_count = 2
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 20
+    resource_requests['compute']['mycompute-1'] = msg
+
+    ####### mynet-0
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    resource_requests['network']['mynet-0'] = msg
+
+    ####### mynet-1
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    resource_requests['network']['mynet-1'] = msg
+
+    return resource_requests
+
+def create_mock_resource_temaplate():
+    ### Resource to be reuqested for 'mock'
+    resource_requests = {'compute': {}, 'network': {}}
+
+    ###### mycompute-0
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-0'))
+    msg.vm_flavor.vcpu_count = 4
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 40
+    resource_requests['compute']['mycompute-0'] = msg
+
+    ###### mycompute-1
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-1'))
+    msg.vm_flavor.vcpu_count = 2
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 20
+    resource_requests['compute']['mycompute-1'] = msg
+
+    ####### mynet-0
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    resource_requests['network']['mynet-0'] = msg
+
+    ####### mynet-1
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    resource_requests['network']['mynet-1'] = msg
+
+    return resource_requests
+
+
+def create_openstack_static_template():
+    ### Resource to be reuqested for 'openstack_static'
+    resource_requests = {'compute': {}, 'network': {}}
+
+    ###### mycompute-0
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = openstack_info['image_id']
+    msg.vm_flavor.vcpu_count = 4
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 80
+    resource_requests['compute']['mycompute-0'] = msg
+
+    ###### mycompute-1
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = openstack_info['image_id']
+    msg.vm_flavor.vcpu_count = 2
+    msg.vm_flavor.memory_mb = 4096
+    msg.vm_flavor.storage_gb = 40
+    resource_requests['compute']['mycompute-1'] = msg
+
+    ####### mynet-0
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    msg.provider_network.physical_network = 'PHYSNET1'
+    msg.provider_network.overlay_type = 'VLAN'
+    msg.provider_network.segmentation_id = 17
+    resource_requests['network']['mynet-0'] = msg
+
+    ####### mynet-1
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    msg.provider_network.physical_network = 'PHYSNET1'
+    msg.provider_network.overlay_type = 'VLAN'
+    msg.provider_network.segmentation_id = 18
+    resource_requests['network']['mynet-1'] = msg
+
+    return resource_requests
+
+
+def create_openstack_dynamic_template():
+    ### Resource to be reuqested for 'openstack_dynamic'
+    resource_requests = {'compute': {}, 'network': {}}
+
+    ###### mycompute-0
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = openstack_info['image_id']
+    msg.vm_flavor.vcpu_count = 2
+    msg.vm_flavor.memory_mb = 4096
+    msg.vm_flavor.storage_gb = 40
+    msg.guest_epa.mempage_size = 'LARGE'
+    msg.guest_epa.cpu_pinning_policy = 'DEDICATED'
+    msg.allocate_public_address = True
+
+    resource_requests['compute']['mycompute-0'] = msg
+
+    ###### mycompute-1
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = openstack_info['image_id']
+    msg.vm_flavor.vcpu_count = 4
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 40
+    msg.guest_epa.mempage_size = 'LARGE'
+    msg.guest_epa.cpu_pinning_policy = 'DEDICATED'
+    msg.allocate_public_address = True
+
+    resource_requests['compute']['mycompute-1'] = msg
+
+    ####### mynet-0
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    #msg.provider_network.overlay_type = 'VXLAN'
+    #msg.provider_network.segmentation_id = 71
+
+    resource_requests['network']['mynet-0'] = msg
+
+    ####### mynet-1
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    #msg.provider_network.overlay_type = 'VXLAN'
+    #msg.provider_network.segmentation_id = 73
+    resource_requests['network']['mynet-1'] = msg
+
+    return resource_requests
+
+
+
+
+resource_requests = {
+    'mock' : create_mock_resource_temaplate(),
+    'openstack_static': create_openstack_static_template(),
+    'openstack_dynamic': create_openstack_dynamic_template(),
+    'cloudsim': create_cloudsim_resource_template(),
+}
+
+
+def get_cal_account(account_type):
+    """
+    Creates an object for class RwcalYang.CloudAccount()
+    """
+    account = RwcalYang.CloudAccount()
+    if account_type == 'mock':
+        account.name          = 'mock_account'
+        account.account_type  = "mock"
+        account.mock.username = "mock_user"
+    elif ((account_type == 'openstack_static') or (account_type == 'openstack_dynamic')):
+        account.name = 'openstack_cal'
+        account.account_type = 'openstack'
+        account.openstack.key = openstack_info['username']
+        account.openstack.secret       = openstack_info['password']
+        account.openstack.auth_url     = openstack_info['auth_url']
+        account.openstack.tenant       = openstack_info['project_name']
+        account.openstack.mgmt_network = openstack_info['mgmt_network']
+
+    elif account_type == 'cloudsim':
+        account.name          = 'cloudsim'
+        account.account_type  = "cloudsim_proxy"
+
+    return account
+
+def create_cal_plugin(account, log_hdl):
+    plugin_name = getattr(account, account.account_type).plugin_name
+    plugin = rw_peas.PeasPlugin(plugin_name, 'RwCal-1.0')
+    engine, info, extension = plugin()
+    rwcal = plugin.get_interface("Cloud")
+    try:
+        rc = rwcal.init(log_hdl)
+        assert rc == RwStatus.SUCCESS
+    except Exception as e:
+        raise
+    return rwcal
+
+
+class RMMgrTestCase(rift.test.dts.AbstractDTSTest):
+    rwcal = None
+    rwcal_acct_info = None
+
+    @classmethod
+    def configure_suite(cls, rwmain):
+        rm_dir = os.environ.get('RM_DIR')
+        cnt_mgr_dir = os.environ.get('CNTR_MGR_DIR')
+        cal_proxy_dir = os.environ.get('CAL_PROXY_DIR')
+
+        cls.rwmain.add_tasklet(cal_proxy_dir, 'rwcalproxytasklet')
+        cls.rwmain.add_tasklet(rm_dir, 'rwresmgrtasklet')
+        cls.rwmain.add_tasklet(cnt_mgr_dir, 'rwcntmgrtasklet')
+
+    @classmethod
+    def configure_schema(cls):
+        return rmgryang.get_schema()
+
+    @asyncio.coroutine
+    def wait_tasklets(self):
+        yield from asyncio.sleep(1, loop=self.loop)
+
+    @classmethod
+    def configure_timeout(cls):
+        return 360
+
+    def get_cloud_account_msg(self, acct_type):
+        cloud_account = RwCloudYang.CloudAccount()
+        acct = get_cal_account(acct_type)
+        cloud_account.from_dict(acct.as_dict())
+        cloud_account.name = acct.name
+        return cloud_account
+
+    def get_compute_pool_msg(self, name, pool_type, cloud_type):
+        pool_config = rmgryang.ResourcePools()
+        pool = pool_config.pools.add()
+        pool.name = name
+        pool.resource_type = "compute"
+        if pool_type == "static":
+            pool.pool_type = 'static'
+            acct = get_cal_account(cloud_type)
+            rwcal = create_cal_plugin(acct, self.tinfo.get_rwlog_ctx())
+            rc, rsp = rwcal.get_vdu_list(acct)
+            assert rc == RwStatus.SUCCESS
+
+            if cloud_type == 'openstack_static':
+                for vdu in rsp.vdu_info_list:
+                    if vdu.name in openstack_info['vms']:
+                        self.log.info("Adding the static compute resource: %s to compute pool", vdu.name)
+                        r = pool.resources.add()
+                        r.resource_id = vdu.vdu_id
+            else:
+                # 'mock', 'cloudsim' 'openstack_dynamic' etc
+                for vdu in rsp.vdu_info_list:
+                    self.log.info("Adding the static compute resource: %s to compute pool", vdu.name)
+                    r = pool.resources.add()
+                    r.resource_id = vdu.vdu_id
+        else:
+            pool.pool_type = 'dynamic'
+            pool.max_size = 10
+        return pool_config
+
+    def get_network_pool_msg(self, name, pool_type, cloud_type):
+        pool_config = rmgryang.ResourcePools()
+        pool = pool_config.pools.add()
+        pool.name = name
+        pool.resource_type = "network"
+        if pool_type == "static":
+            pool.pool_type = 'static'
+            acct = get_cal_account(cloud_type)
+            rwcal = create_cal_plugin(acct, self.tinfo.get_rwlog_ctx())
+            rc, rsp = rwcal.get_virtual_link_list(acct)
+            assert rc == RwStatus.SUCCESS
+            if cloud_type == 'openstack_static':
+                for vlink in rsp.virtual_link_info_list:
+                    if vlink.name in openstack_info['networks']:
+                        self.log.info("Adding the static network resource: %s to network pool", vlink.name)
+                        r = pool.resources.add()
+                        r.resource_id = vlink.virtual_link_id
+            else:
+                # 'mock', 'cloudsim', 'openstack_dynamic' etc
+                for vlink in rsp.virtual_link_info_list:
+                    self.log.info("Adding the static network resource: %s to network pool", vlink.name)
+                    r = pool.resources.add()
+                    r.resource_id = vlink.virtual_link_id
+        else:
+            pool.pool_type = 'dynamic'
+            pool.max_size = 4
+        return pool_config
+
+
+    def get_network_reserve_msg(self, name, cloud_type, xpath):
+        event_id = str(uuid.uuid4())
+        msg = rmgryang.VirtualLinkEventData()
+        msg.event_id = event_id
+        msg.request_info.name = name
+        attributes = ['physical_network', 'name', 'overlay_type', 'segmentation_id']
+
+        for attr in attributes:
+            if resource_requests[cloud_type]['network'][name].has_field('provider_network'):
+                if resource_requests[cloud_type]['network'][name].provider_network.has_field(attr):
+                    setattr(msg.request_info.provider_network, attr,
+                            getattr(resource_requests[cloud_type]['network'][name].provider_network ,attr))
+
+        return msg, xpath.format(event_id)
+
+    def get_compute_reserve_msg(self, name, cloud_type, xpath, vlinks):
+        event_id = str(uuid.uuid4())
+        msg = rmgryang.VDUEventData()
+        msg.event_id = event_id
+        msg.request_info.name = name
+        msg.request_info.image_id = resource_requests[cloud_type]['compute'][name].image_id
+        attributes = ['image_id', 'vcpu_count', 'memory_mb', 'storage_gb']
+
+        if resource_requests[cloud_type]['compute'][name].has_field('vm_flavor'):
+            for attr in attributes:
+                if resource_requests[cloud_type]['compute'][name].vm_flavor.has_field(attr):
+                    setattr(msg.request_info.vm_flavor,
+                            attr,
+                            getattr(resource_requests[cloud_type]['compute'][name].vm_flavor , attr))
+
+        attributes = ['mempage_size', 'cpu_pinning_policy']
+
+        if resource_requests[cloud_type]['compute'][name].has_field('guest_epa'):
+            for attr in attributes:
+                if resource_requests[cloud_type]['compute'][name].guest_epa.has_field(attr):
+                    setattr(msg.request_info.guest_epa,
+                            attr,
+                            getattr(resource_requests[cloud_type]['compute'][name].guest_epa , attr))
+
+        if resource_requests[cloud_type]['compute'][name].has_field('allocate_public_address'):
+            msg.request_info.allocate_public_address = resource_requests[cloud_type]['compute'][name].allocate_public_address
+
+        cnt = 0
+        for link in vlinks:
+            c1 = msg.request_info.connection_points.add()
+            c1.name = name+"-port-"+str(cnt)
+            cnt += 1
+            c1.virtual_link_id = link
+
+        self.log.info("Sending message :%s", msg)
+        return msg, xpath.format(event_id)
+
+    @asyncio.coroutine
+    def configure_cloud_account(self, dts, acct_type):
+        account_xpath = "C,/rw-cloud:cloud/account"
+        msg = self.get_cloud_account_msg(acct_type)
+        self.log.info("Configuring cloud-account: %s",msg)
+        yield from dts.query_create(account_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    msg)
+
+    @asyncio.coroutine
+    def configure_compute_resource_pools(self, dts, resource_type, cloud_type):
+        pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
+        msg = self.get_compute_pool_msg("virtual-compute", resource_type, cloud_type)
+        self.log.info("Configuring compute-resource-pool: %s",msg)
+        yield from dts.query_create(pool_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    msg)
+
+
+    @asyncio.coroutine
+    def configure_network_resource_pools(self, dts, resource_type, cloud_type):
+        pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
+        msg = self.get_network_pool_msg("virtual-network", resource_type, cloud_type)
+        self.log.info("Configuring network-resource-pool: %s",msg)
+        yield from dts.query_create(pool_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    msg)
+
+    @asyncio.coroutine
+    def verify_resource_pools_config(self, dts):
+        pool_records_xpath = "D,/rw-resource-mgr:resource-pool-records"
+        self.log.debug("Verifying test_create_resource_pools results")
+        res_iter = yield from dts.query_read(pool_records_xpath,)
+        for result in res_iter:
+            response = yield from result
+            records = response.result.records
+            #self.assertEqual(len(records), 2)
+            #names = [i.name for i in records]
+            #self.assertTrue('virtual-compute' in names)
+            #self.assertTrue('virtual-network' in names)
+            for record in records:
+                self.log.debug("Received Pool Record, Name: %s, Resource Type: %s, Pool Status: %s, Pool Size: %d, Allocated Resources: %d, Free Resources: %d",
+                               record.name,
+                               record.resource_type,
+                               record.pool_status,
+                               record.total_resources,
+                               record.allocated_resources,
+                               record.free_resources)
+
+    @asyncio.coroutine
+    def read_resource(self, dts, xpath):
+        self.log.debug("Reading data for XPATH:%s", xpath)
+        result = yield from dts.query_read(xpath, rwdts.XactFlag.MERGE)
+        msg = None
+        for r in result:
+            msg = yield from r
+        self.log.debug("Received data: %s", msg.result)
+        return msg.result
+
+    @asyncio.coroutine
+    def reserve_network_resources(self, name, dts, cloud_type):
+        network_xpath = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']"
+        msg,xpath = self.get_network_reserve_msg(name, cloud_type, network_xpath)
+        self.log.debug("Sending create event to network-event xpath %s with msg: %s" % (xpath, msg))
+        yield from dts.query_create(xpath, 0, msg)
+        return xpath
+
+
+    @asyncio.coroutine
+    def reserve_compute_resources(self, name, dts, cloud_type, vlinks = []):
+        compute_xpath = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']"
+        msg,xpath = self.get_compute_reserve_msg(name, cloud_type, compute_xpath, vlinks)
+        self.log.debug("Sending create event to compute-event xpath %s with msg: %s" % (xpath, msg))
+        yield from dts.query_create(xpath, 0, msg)
+        return xpath
+
+    @asyncio.coroutine
+    def release_network_resources(self, dts, xpath):
+        self.log.debug("Initiating network resource release for  : %s ", xpath)
+        yield from dts.query_delete(xpath, 0)
+
+    @asyncio.coroutine
+    def release_compute_resources(self, dts, xpath):
+        self.log.debug("Initiating compute resource release for  : %s ", xpath)
+        yield from dts.query_delete(xpath, 0)
+
+    @unittest.skip("Skipping test_static_pool_resource_allocation")
+    def test_static_pool_resource_allocation(self):
+        self.log.debug("STARTING - test_static_pool_resource_allocation")
+        tinfo = self.new_tinfo('static_mock')
+        dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+
+        @asyncio.coroutine
+        def run_test():
+            networks = []
+            computes = []
+            cloud_type = 'mock'
+            yield from self.wait_tasklets()
+            yield from self.configure_cloud_account(dts, cloud_type)
+
+            yield from self.configure_network_resource_pools(dts, "static", cloud_type)
+            yield from self.configure_compute_resource_pools(dts, "static", cloud_type)
+            yield from self.verify_resource_pools_config(dts)
+
+            r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type)
+            r_info = yield from self.read_resource(dts,r_xpath)
+            networks.append((r_xpath, r_info.resource_info))
+
+            for i in range(2):
+                r_xpath = yield from self.reserve_compute_resources("mycompute-"+str(i),
+                                                                    dts,
+                                                                    cloud_type,
+                                                                    [networks[0][1].virtual_link_id])
+                r_info = yield from self.read_resource(dts,r_xpath)
+                computes.append((r_xpath, r_info))
+
+            yield from self.verify_resource_pools_config(dts)
+
+            for r in computes:
+                yield from self.release_compute_resources(dts, r[0])
+
+            yield from self.release_network_resources(dts,networks[0][0])
+            yield from self.verify_resource_pools_config(dts)
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+        self.log.debug("DONE - test_static_pool_resource_allocation")
+
+    @unittest.skip("Skipping test_dynamic_pool_resource_allocation")
+    def test_dynamic_pool_resource_allocation(self):
+        self.log.debug("STARTING - test_dynamic_pool_resource_allocation")
+        tinfo = self.new_tinfo('dynamic_mock')
+        dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+
+        @asyncio.coroutine
+        def run_test():
+            networks = []
+            computes = []
+            cloud_type = 'mock'
+            yield from self.wait_tasklets()
+            yield from self.configure_cloud_account(dts, cloud_type)
+            yield from self.configure_network_resource_pools(dts, "dynamic", cloud_type)
+            yield from self.configure_compute_resource_pools(dts, "dynamic", cloud_type)
+            yield from self.verify_resource_pools_config(dts)
+
+            r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type)
+            r_info = yield from self.read_resource(dts,r_xpath)
+            networks.append((r_xpath, r_info.resource_info))
+
+            for i in range(2):
+                r_xpath = yield from self.reserve_compute_resources("mycompute-"+str(i),
+                                                                    dts,
+                                                                    cloud_type,
+                                                                    [networks[0][1].virtual_link_id])
+                r_info = yield from self.read_resource(dts,r_xpath)
+                computes.append((r_xpath, r_info))
+
+            yield from self.verify_resource_pools_config(dts)
+
+            for r in computes:
+                self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id)
+                yield from self.release_compute_resources(dts, r[0])
+
+            yield from self.release_network_resources(dts,networks[0][0])
+            yield from self.verify_resource_pools_config(dts)
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+        self.log.debug("DONE - test_dynamic_pool_resource_allocation")
+
+    @unittest.skip("Skipping test_dynamic_pool_resource_allocation")
+    def test_dynamic_cloudsim_pool_resource_allocation(self):
+        self.log.debug("STARTING - test_dynamic_pool_resource_allocation")
+        tinfo = self.new_tinfo('dynamic_mock')
+        dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+
+        @asyncio.coroutine
+        def run_test():
+            networks = []
+            computes = []
+            cloud_type = 'cloudsim'
+
+            yield from asyncio.sleep(120, loop=self.loop)
+            yield from self.configure_cloud_account(dts, cloud_type)
+            yield from self.configure_network_resource_pools(dts, "dynamic", cloud_type)
+            yield from self.configure_compute_resource_pools(dts, "dynamic", cloud_type)
+            yield from self.verify_resource_pools_config(dts)
+
+            r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type)
+            r_info = yield from self.read_resource(dts,r_xpath)
+            networks.append((r_xpath, r_info.resource_info))
+
+            for i in range(2):
+                r_xpath = yield from self.reserve_compute_resources("mycompute-"+str(i),
+                                                                    dts,
+                                                                    cloud_type,
+                                                                    [networks[0][1].virtual_link_id])
+                r_info = yield from self.read_resource(dts,r_xpath)
+                computes.append((r_xpath, r_info))
+
+            yield from self.verify_resource_pools_config(dts)
+
+            for r in computes:
+                self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id)
+                yield from self.release_compute_resources(dts, r[0])
+
+            yield from self.release_network_resources(dts,networks[0][0])
+            yield from self.verify_resource_pools_config(dts)
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+        self.log.debug("DONE - test_dynamic_pool_resource_allocation")
+
+    @unittest.skip("Skipping test_static_pool_openstack_resource_allocation")
+    def test_static_pool_openstack_resource_allocation(self):
+        self.log.debug("STARTING - test_static_pool_openstack_resource_allocation")
+        tinfo = self.new_tinfo('static_openstack')
+        dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+
+        @asyncio.coroutine
+        def run_test():
+            networks = []
+            computes = []
+            cloud_type = 'openstack_static'
+            yield from self.wait_tasklets()
+            yield from self.configure_cloud_account(dts, cloud_type)
+            yield from self.configure_network_resource_pools(dts, "static", cloud_type)
+            yield from self.configure_compute_resource_pools(dts, "static", cloud_type)
+            yield from self.verify_resource_pools_config(dts)
+
+            self.log.debug("Creating virtual-network-resources in openstack")
+            r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type)
+            r_info = yield from self.read_resource(dts,r_xpath)
+            networks.append((r_xpath, r_info.resource_info))
+            self.log.debug("virtual-network-resources successfully created in openstack")
+
+            self.log.debug("Creating virtual-network-compute in openstack")
+            for i in range(2):
+                r_xpath = yield from self.reserve_compute_resources("mycompute-" + str(i),
+                                                                    dts,
+                                                                    cloud_type,
+                                                                    [networks[0][1].virtual_link_id])
+                r_info = yield from self.read_resource(dts,r_xpath)
+                computes.append((r_xpath, r_info))
+
+            yield from self.verify_resource_pools_config(dts)
+            for r in computes:
+                self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id)
+                yield from self.release_compute_resources(dts, r[0])
+
+            yield from self.release_network_resources(dts,networks[0][0])
+            yield from self.verify_resource_pools_config(dts)
+            self.log.debug("Openstack static resource allocation completed")
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+        self.log.debug("DONE - test_static_pool_openstack_resource_allocation")
+
+    #@unittest.skip("Skipping test_dynamic_pool_openstack_resource_allocation")
+    def test_dynamic_pool_openstack_resource_allocation(self):
+        self.log.debug("STARTING - test_dynamic_pool_openstack_resource_allocation")
+        tinfo = self.new_tinfo('dynamic_openstack')
+        dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+
+        @asyncio.coroutine
+        def run_test():
+            networks = []
+            computes = []
+            cloud_type = 'openstack_dynamic'
+            yield from self.wait_tasklets()
+            yield from self.configure_cloud_account(dts, cloud_type)
+            yield from self.configure_network_resource_pools(dts, "dynamic", cloud_type)
+            yield from self.configure_compute_resource_pools(dts, "dynamic", cloud_type)
+            yield from self.verify_resource_pools_config(dts)
+
+            self.log.debug("Creating virtual-network-resources in openstack")
+            r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type)
+            r_info = yield from self.read_resource(dts,r_xpath)
+            networks.append((r_xpath, r_info.resource_info))
+            self.log.debug("virtual-network-resources successfully created in openstack")
+
+            self.log.debug("Creating virtual-network-compute in openstack")
+            for i in range(2):
+                r_xpath = yield from self.reserve_compute_resources("mycompute-" + str(i),
+                                                                    dts,
+                                                                    cloud_type,
+                                                                    [networks[0][1].virtual_link_id])
+                r_info = yield from self.read_resource(dts,r_xpath)
+                computes.append((r_xpath, r_info))
+
+            yield from self.verify_resource_pools_config(dts)
+            for r in computes:
+                self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id)
+                #yield from self.release_compute_resources(dts, r[0])
+
+            self.log.debug("Releasing network resource with id: %s", r[1].resource_info.vdu_id)
+            #yield from self.release_network_resources(dts,networks[0][0])
+            #yield from self.verify_resource_pools_config(dts)
+            self.log.debug("Openstack dynamic resource allocation completed")
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+        self.log.debug("DONE - test_dynamic_pool_openstack_resource_allocation")
+
+
+def main():
+    plugin_dir = os.path.join(os.environ["RIFT_INSTALL"], "usr/lib/rift/plugins")
+
+    if 'MESSAGE_BROKER_DIR' not in os.environ:
+        os.environ['MESSAGE_BROKER_DIR'] = os.path.join(plugin_dir, 'rwmsgbroker-c')
+
+    if 'ROUTER_DIR' not in os.environ:
+        os.environ['ROUTER_DIR'] = os.path.join(plugin_dir, 'rwdtsrouter-c')
+
+    if 'RM_DIR' not in os.environ:
+        os.environ['RM_DIR'] = os.path.join(plugin_dir, 'rwresmgrtasklet')
+
+    if 'CAL_PROXY_DIR' not in os.environ:
+        os.environ['CAL_PROXY_DIR'] = os.path.join(plugin_dir, 'rwcalproxytasklet')
+
+    if 'CNTR_MGR_DIR' not in os.environ:
+        os.environ['CNTR_MGR_DIR'] = os.path.join(plugin_dir, 'rwcntmgrtasklet')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    unittest.main(testRunner=runner)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/rwlaunchpad/plugins/rwvnfm/CMakeLists.txt b/rwlaunchpad/plugins/rwvnfm/CMakeLists.txt
new file mode 100644
index 0000000..7bc05a7
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvnfm/CMakeLists.txt
@@ -0,0 +1,39 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 05/15/2015
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwvnfmtasklet)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
+
diff --git a/rwlaunchpad/plugins/rwvnfm/Makefile b/rwlaunchpad/plugins/rwvnfm/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvnfm/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/__init__.py b/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/__init__.py
new file mode 100644
index 0000000..9728738
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/__init__.py
@@ -0,0 +1 @@
+from .rwvnfmtasklet import VnfmTasklet
\ No newline at end of file
diff --git a/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py b/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py
new file mode 100755
index 0000000..17e6fbf
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py
@@ -0,0 +1,2751 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import collections
+import enum
+import logging
+import uuid
+import time
+import os.path
+import re
+import shutil
+import sys
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwVnfmYang', '1.0')
+gi.require_version('RwVlrYang', '1.0')
+gi.require_version('RwManifestYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwVnfrYang,
+    RwVnfmYang,
+    RwVlrYang,
+    VnfrYang,
+    RwManifestYang,
+    RwBaseYang,
+    RwResourceMgrYang,
+    ProtobufC,
+)
+
+import rift.tasklets
+import rift.package.store
+import rift.package.cloud_init
+
+
+class VMResourceError(Exception):
+    """ VM resource Error"""
+    pass
+
+
+class VnfRecordError(Exception):
+    """ VNF record instatiation failed"""
+    pass
+
+
+class VduRecordError(Exception):
+    """ VDU record instatiation failed"""
+    pass
+
+
+class NotImplemented(Exception):
+    """Not implemented """
+    pass
+
+
+class VnfrRecordExistsError(Exception):
+    """VNFR record already exist with the same VNFR id"""
+    pass
+
+
+class InternalVirtualLinkRecordError(Exception):
+    """Internal virtual link record error"""
+    pass
+
+
+class VDUImageNotFound(Exception):
+    """VDU Image not found error"""
+    pass
+
+
+class VirtualDeploymentUnitRecordError(Exception):
+    """VDU Instantiation failed"""
+    pass
+
+
+class VMNotReadyError(Exception):
+    """ VM Not yet received from resource manager """
+    pass
+
+
+class VDURecordNotFound(Exception):
+    """ Could not find a VDU record """
+    pass
+
+
+class VirtualNetworkFunctionRecordDescNotFound(Exception):
+    """ Cannot find Virtual Network Function Record Descriptor """
+    pass
+
+
+class VirtualNetworkFunctionDescriptorError(Exception):
+    """ Virtual Network Function Record Descriptor Error """
+    pass
+
+
+class VirtualNetworkFunctionDescriptorNotFound(Exception):
+    """ Virtual Network Function Record Descriptor Not Found """
+    pass
+
+
+class VirtualNetworkFunctionRecordNotFound(Exception):
+    """ Virtual Network Function Record Not Found """
+    pass
+
+
+class VirtualNetworkFunctionDescriptorRefCountExists(Exception):
+    """ Virtual Network Funtion Descriptor reference count exists """
+    pass
+
+
+class VnfrInstantiationFailed(Exception):
+    """ Virtual Network Funtion Instantiation failed"""
+    pass
+
+
+class VNFMPlacementGroupError(Exception):
+    pass
+
+class VirtualNetworkFunctionRecordState(enum.Enum):
+    """ VNFR state """
+    INIT = 1
+    VL_INIT_PHASE = 2
+    VM_INIT_PHASE = 3
+    READY = 4
+    TERMINATE = 5
+    VL_TERMINATE_PHASE = 6
+    VDU_TERMINATE_PHASE = 7
+    TERMINATED = 7
+    FAILED = 10
+
+
+class VDURecordState(enum.Enum):
+    """VDU record state """
+    INIT = 1
+    INSTANTIATING = 2
+    RESOURCE_ALLOC_PENDING = 3
+    READY = 4
+    TERMINATING = 5
+    TERMINATED = 6
+    FAILED = 10
+
+
+class VcsComponent(object):
+    """ VCS Component within the VNF descriptor """
+    def __init__(self, dts, log, loop, cluster_name, vcs_handler, component, mangled_name):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._component = component
+        self._cluster_name = cluster_name
+        self._vcs_handler = vcs_handler
+        self._mangled_name = mangled_name
+
+    @staticmethod
+    def mangle_name(component_name, vnf_name, vnfd_id):
+        """ mangled  component name """
+        return vnf_name + ":" + component_name + ":" + vnfd_id
+
+    @property
+    def name(self):
+        """ name of this component"""
+        return self._mangled_name
+
+    @property
+    def path(self):
+        """ The path for this object """
+        return("D,/rw-manifest:manifest" +
+               "/rw-manifest:operational-inventory" +
+               "/rw-manifest:component" +
+               "[rw-manifest:component-name = '{}']").format(self.name)
+
+    @property
+    def instance_xpath(self):
+        """ The path for this object """
+        return("D,/rw-base:vcs" +
+               "/instances" +
+               "/instance" +
+               "[instance-name = '{}']".format(self._cluster_name))
+
+    @property
+    def start_comp_xpath(self):
+        """ start component xpath """
+        return (self.instance_xpath +
+                "/child-n[instance-name = 'START-REQ']")
+
+    def get_start_comp_msg(self, ip_address):
+        """ start this component """
+        start_msg = RwBaseYang.VcsInstance_Instance_ChildN()
+        start_msg.instance_name = 'START-REQ'
+        start_msg.component_name = self.name
+        start_msg.admin_command = "START"
+        start_msg.ip_address = ip_address
+
+        return start_msg
+
+    @property
+    def msg(self):
+        """ Returns the message for this vcs component"""
+
+        vcs_comp_dict = self._component.as_dict()
+
+        def mangle_comp_names(comp_dict):
+            """ mangle component name  with VNF name, id"""
+            for key, val in comp_dict.items():
+                if isinstance(val, dict):
+                    comp_dict[key] = mangle_comp_names(val)
+                elif isinstance(val, list):
+                    i = 0
+                    for ent in val:
+                        if isinstance(ent, dict):
+                            val[i] = mangle_comp_names(ent)
+                        else:
+                            val[i] = ent
+                        i += 1
+                elif key == "component_name":
+                    comp_dict[key] = VcsComponent.mangle_name(val,
+                                                              self._vnfd_name,
+                                                              self._vnfd_id)
+            return comp_dict
+
+        mangled_dict = mangle_comp_names(vcs_comp_dict)
+        msg = RwManifestYang.OpInventory_Component.from_dict(mangled_dict)
+        return msg
+
+    @asyncio.coroutine
+    def publish(self, xact):
+        """ Publishes the VCS component """
+        self._log.debug("Publishing the VcsComponent %s, path = %s comp = %s",
+                        self.name, self.path, self.msg)
+        yield from self._vcs_handler.publish(xact, self.path, self.msg)
+
+    @asyncio.coroutine
+    def start(self, xact, parent, ip_addr=None):
+        """ Starts this VCS component """
+        # ATTN RV - replace with block add
+        start_msg = self.get_start_comp_msg(ip_addr)
+        self._log.debug("starting component %s %s",
+                        self.start_comp_xpath, start_msg)
+        yield from self._dts.query_create(self.start_comp_xpath,
+                                          0,
+                                          start_msg)
+        self._log.debug("started component %s, %s",
+                        self.start_comp_xpath, start_msg)
+
+
+class VirtualDeploymentUnitRecord(object):
+    """  Virtual Deployment Unit Record """
+    def __init__(self,
+                 dts,
+                 log,
+                 loop,
+                 vdud,
+                 vnfr,
+                 mgmt_intf,
+                 cloud_account_name,
+                 vnfd_package_store,
+                 vdur_id=None,
+                 placement_groups=[]):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vdud = vdud
+        self._vnfr = vnfr
+        self._mgmt_intf = mgmt_intf
+        self._cloud_account_name = cloud_account_name
+        self._vnfd_package_store = vnfd_package_store
+
+        self._vdur_id = vdur_id or str(uuid.uuid4())
+        self._int_intf = []
+        self._ext_intf = []
+        self._state = VDURecordState.INIT
+        self._state_failed_reason = None
+        self._request_id = str(uuid.uuid4())
+        self._name = vnfr.name + "__" + vdud.id
+        self._placement_groups = placement_groups
+        self._rm_regh = None
+        self._vm_resp = None
+        self._vdud_cloud_init = None
+        self._vdur_console_handler = VnfrConsoleOperdataDtsHandler(dts, log, loop, self._vnfr._vnfm, self._vnfr.vnfr_id, self._vdur_id,self.vdu_id)
+
+    @asyncio.coroutine
+    def vdu_opdata_register(self):
+        yield from self._vdur_console_handler.register()
+
+    def cp_ip_addr(self, cp_name):
+        """ Find ip address by connection point name """
+        if self._vm_resp is not None:
+            for conn_point in self._vm_resp.connection_points:
+                if conn_point.name == cp_name:
+                    return conn_point.ip_address
+        return "0.0.0.0"
+
+    def cp_id(self, cp_name):
+        """ Find connection point id  by connection point name """
+        if self._vm_resp is not None:
+            for conn_point in self._vm_resp.connection_points:
+                if conn_point.name == cp_name:
+                    return conn_point.connection_point_id
+        return ''
+
+    @property
+    def vdu_id(self):
+        return self._vdud.id
+
+    @property
+    def vm_resp(self):
+        return self._vm_resp
+
+    @property
+    def name(self):
+        """ Return this VDUR's name """
+        return self._name
+
+    @property
+    def cloud_account_name(self):
+        """ Cloud account this VDU should be created in """
+        return self._cloud_account_name
+
+    @property
+    def image_name(self):
+        """ name that should be used to lookup the image on the CMP """
+        return os.path.basename(self._vdud.image)
+
+    @property
+    def image_checksum(self):
+        """ name that should be used to lookup the image on the CMP """
+        return self._vdud.image_checksum if self._vdud.has_field("image_checksum") else None
+
+    @property
+    def management_ip(self):
+        if not self.active:
+            return None
+        return self._vm_resp.public_ip if self._vm_resp.has_field('public_ip') else self._vm_resp.management_ip
+
+    @property
+    def vm_management_ip(self):
+        if not self.active:
+            return None
+        return self._vm_resp.management_ip
+
+    @property
+    def operational_status(self):
+        """ Operational status of this VDU"""
+        op_stats_dict = {"INIT": "init",
+                         "INSTANTIATING": "vm_init_phase",
+                         "RESOURCE_ALLOC_PENDING": "vm_alloc_pending",
+                         "READY": "running",
+                         "FAILED": "failed",
+                         "TERMINATING": "terminated",
+                         "TERMINATED": "terminated",
+                         }
+        return op_stats_dict[self._state.name]
+
+    @property
+    def msg(self):
+        """ VDU message """
+        vdu_fields = ["vm_flavor",
+                      "guest_epa",
+                      "vswitch_epa",
+                      "hypervisor_epa",
+                      "host_epa",
+                      "name"]
+        vdu_copy_dict = {k: v for k, v in
+                         self._vdud.as_dict().items() if k in vdu_fields}
+        vdur_dict = {"id": self._vdur_id,
+                     "vdu_id_ref": self._vdud.id,
+                     "operational_status": self.operational_status,
+                     "operational_status_details": self._state_failed_reason,
+                     }
+        if self.vm_resp is not None:
+            vdur_dict.update({"vim_id": self.vm_resp.vdu_id,
+                              "flavor_id": self.vm_resp.flavor_id,
+                              "image_id": self.vm_resp.image_id,
+                              })
+
+        if self.management_ip is not None:
+            vdur_dict["management_ip"] = self.management_ip
+
+        if self.vm_management_ip is not None:
+            vdur_dict["vm_management_ip"] = self.vm_management_ip
+
+        vdur_dict.update(vdu_copy_dict)
+
+        icp_list = []
+        ii_list = []
+
+        for intf, cp_id, vlr in self._int_intf:
+            cp = self.find_internal_cp_by_cp_id(cp_id)
+
+            icp_list.append({"name": cp.name,
+                             "id": cp.id,
+                             "type_yang": "VPORT",
+                             "ip_address": self.cp_ip_addr(cp.id)})
+
+            ii_list.append({"name": intf.name,
+                            "vdur_internal_connection_point_ref": cp.id,
+                            "virtual_interface": {}})
+
+        vdur_dict["internal_connection_point"] = icp_list
+        self._log.debug("internal_connection_point:%s", vdur_dict["internal_connection_point"])
+        vdur_dict["internal_interface"] = ii_list
+
+        ei_list = []
+        for intf, cp, vlr in self._ext_intf:
+            ei_list.append({"name": cp,
+                            "vnfd_connection_point_ref": cp,
+                            "virtual_interface": {}})
+            self._vnfr.update_cp(cp, self.cp_ip_addr(cp), self.cp_id(cp))
+
+        vdur_dict["external_interface"] = ei_list
+
+        placement_groups = []
+        for group in self._placement_groups:
+            placement_groups.append(group.as_dict())
+
+        vdur_dict['placement_groups_info'] = placement_groups
+        return RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur.from_dict(vdur_dict)
+
+    @property
+    def resmgr_path(self):
+        """ path for resource-mgr"""
+        return ("D,/rw-resource-mgr:resource-mgmt" +
+                "/vdu-event" +
+                "/vdu-event-data[event-id='{}']".format(self._request_id))
+
+    @property
+    def vm_flavor_msg(self):
+        """ VM flavor message """
+        flavor = self._vdud.vm_flavor.__class__()
+        flavor.copy_from(self._vdud.vm_flavor)
+
+        return flavor
+
+    @property
+    def vdud_cloud_init(self):
+        """ Return the cloud-init contents for the VDU """
+        if self._vdud_cloud_init is None:
+            self._vdud_cloud_init = self.cloud_init()
+
+        return self._vdud_cloud_init
+
+    def cloud_init(self):
+        """ Populate cloud_init with cloud-config script from
+            either the inline contents or from the file provided
+        """
+        if self._vdud.cloud_init is not None:
+            self._log.debug("cloud_init script provided inline %s", self._vdud.cloud_init)
+            return self._vdud.cloud_init
+        elif self._vdud.cloud_init_file is not None:
+            # Get cloud-init script contents from the file provided in the cloud_init_file param
+            self._log.debug("cloud_init script provided in file %s", self._vdud.cloud_init_file)
+            filename = self._vdud.cloud_init_file
+            self._vnfd_package_store.refresh()
+            stored_package = self._vnfd_package_store.get_package(self._vnfr.vnfd_id)
+            cloud_init_extractor = rift.package.cloud_init.PackageCloudInitExtractor(self._log)
+            try:
+                return cloud_init_extractor.read_script(stored_package, filename)
+            except rift.package.cloud_init.CloudInitExtractionError as e:
+                raise VirtualDeploymentUnitRecordError(e)
+        else:
+            self._log.debug("VDU Instantiation: cloud-init script not provided")
+
+    def process_openstack_placement_group_construct(self, vm_create_msg_dict):
+        host_aggregates = []
+        availability_zones = []
+        server_groups = []
+        for group in self._placement_groups:
+            if group.has_field('host_aggregate'):
+                for aggregate in group.host_aggregate:
+                    host_aggregates.append(aggregate.as_dict())
+            if group.has_field('availability_zone'):
+                availability_zones.append(group.availability_zone.as_dict())
+            if group.has_field('server_group'):
+                server_groups.append(group.server_group.as_dict())
+
+        if availability_zones:
+            if len(availability_zones) > 1:
+                self._log.error("Can not launch VDU: %s in multiple availability zones. Requested Zones: %s", self.name, availability_zones)
+                raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple availability zones. Requsted Zones".format(self.name, availability_zones))
+            else:
+                vm_create_msg_dict['availability_zone'] = availability_zones[0]
+
+        if server_groups:
+            if len(server_groups) > 1:
+                self._log.error("Can not launch VDU: %s in multiple Server Group. Requested Groups: %s", self.name, server_groups)
+                raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple Server Groups. Requsted Groups".format(self.name, server_groups))
+            else:
+                vm_create_msg_dict['server_group'] = server_groups[0]
+
+        if host_aggregates:
+            vm_create_msg_dict['host_aggregate'] = host_aggregates
+
+        return
+
+    def process_placement_groups(self, vm_create_msg_dict):
+        """Process the placement_groups and fill resource-mgr request"""
+        if not self._placement_groups:
+            return
+
+        cloud_set = set([group.cloud_type for group in self._placement_groups])
+        assert len(cloud_set) == 1
+        cloud_type = cloud_set.pop()
+
+        if cloud_type == 'openstack':
+            self.process_openstack_placement_group_construct(vm_create_msg_dict)
+
+        else:
+            self._log.info("Ignoring placement group with cloud construct for cloud-type: %s", cloud_type)
+        return
+
+    def resmgr_msg(self, config=None):
+        vdu_fields = ["vm_flavor",
+                      "guest_epa",
+                      "vswitch_epa",
+                      "hypervisor_epa",
+                      "host_epa"]
+
+        self._log.debug("Creating params based on VDUD: %s", self._vdud)
+        vdu_copy_dict = {k: v for k, v in self._vdud.as_dict().items() if k in vdu_fields}
+
+        vm_create_msg_dict = {
+                "name": self.name,
+                "image_name": self.image_name,
+                }
+
+        if self.image_checksum is not None:
+            vm_create_msg_dict["image_checksum"] = self.image_checksum
+
+        vm_create_msg_dict["allocate_public_address"] = self._mgmt_intf
+        if self._vdud.has_field('mgmt_vpci'):
+            vm_create_msg_dict["mgmt_vpci"] = self._vdud.mgmt_vpci
+
+        self._log.debug("VDUD: %s", self._vdud)
+        if config is not None:
+            vm_create_msg_dict['vdu_init'] = {'userdata': config}
+
+        cp_list = []
+        for intf, cp, vlr in self._ext_intf:
+            cp_info = {"name": cp,
+                       "virtual_link_id": vlr.network_id,
+                       "type_yang": intf.virtual_interface.type_yang}
+
+            if (intf.virtual_interface.has_field('vpci') and
+                    intf.virtual_interface.vpci is not None):
+                cp_info["vpci"] =  intf.virtual_interface.vpci
+
+            if (vlr.has_field('ip_profile_params')) and (vlr.ip_profile_params.has_field('security_group')):
+                cp_info['security_group'] = vlr.ip_profile_params.security_group
+                
+            cp_list.append(cp_info)
+
+        for intf, cp, vlr in self._int_intf:
+            if (intf.virtual_interface.has_field('vpci') and
+                    intf.virtual_interface.vpci is not None):
+                cp_list.append({"name": cp,
+                                "virtual_link_id": vlr.network_id,
+                                "type_yang": intf.virtual_interface.type_yang,
+                                "vpci": intf.virtual_interface.vpci})
+            else:
+                cp_list.append({"name": cp,
+                                "virtual_link_id": vlr.network_id,
+                                "type_yang": intf.virtual_interface.type_yang})
+
+        vm_create_msg_dict["connection_points"] = cp_list
+        vm_create_msg_dict.update(vdu_copy_dict)
+
+        self.process_placement_groups(vm_create_msg_dict)
+
+        msg = RwResourceMgrYang.VDUEventData()
+        msg.event_id = self._request_id
+        msg.cloud_account = self.cloud_account_name
+        msg.request_info.from_dict(vm_create_msg_dict)
+        return msg
+
+    @asyncio.coroutine
+    def terminate(self, xact):
+        """ Delete resource in VIM """
+        if self._state != VDURecordState.READY and self._state != VDURecordState.FAILED:
+            self._log.warning("VDU terminate in not ready state - Ignoring request")
+            return
+
+        self._state = VDURecordState.TERMINATING
+        if self._vm_resp is not None:
+            try:
+                with self._dts.transaction() as new_xact:
+                    yield from self.delete_resource(new_xact)
+            except Exception:
+                self._log.exception("Caught exception while deleting VDU %s", self.vdu_id)
+
+        if self._rm_regh is not None:
+            self._log.debug("Deregistering resource manager registration handle")
+            self._rm_regh.deregister()
+            self._rm_regh = None
+
+        if self._vdur_console_handler is not None:
+            self._log.error("Deregistering vnfr vdur registration handle")
+            self._vdur_console_handler._regh.deregister()
+            self._vdur_console_handler._regh = None
+
+        self._state = VDURecordState.TERMINATED
+
+    def find_internal_cp_by_cp_id(self, cp_id):
+        """ Find the CP corresponding to the connection point id"""
+        cp = None
+
+        self._log.debug("find_internal_cp_by_cp_id(%s) called",
+                        cp_id)
+
+        for int_cp in self._vdud.internal_connection_point:
+            self._log.debug("Checking for int cp %s in internal connection points",
+                            int_cp.id)
+            if int_cp.id == cp_id:
+                cp = int_cp
+                break
+
+        if cp is None:
+            self._log.debug("Failed to find cp %s in internal connection points",
+                            cp_id)
+            msg = "Failed to find cp %s in internal connection points" % cp_id
+            raise VduRecordError(msg)
+
+        # return the VLR associated with the connection point
+        return cp
+
+    @asyncio.coroutine
+    def create_resource(self, xact, vnfr, config=None):
+        """ Request resource from ResourceMgr """
+        def find_cp_by_name(cp_name):
+            """ Find a connection point by name """
+            cp = None
+            self._log.debug("find_cp_by_name(%s) called", cp_name)
+            for ext_cp in vnfr._cprs:
+                self._log.debug("Checking ext cp (%s) called", ext_cp.name)
+                if ext_cp.name == cp_name:
+                    cp = ext_cp
+                    break
+            if cp is None:
+                self._log.debug("Failed to find cp %s in external connection points",
+                                cp_name)
+            return cp
+
+        def find_internal_vlr_by_cp_name(cp_name):
+            """ Find the VLR corresponding to the connection point name"""
+            cp = None
+
+            self._log.debug("find_internal_vlr_by_cp_name(%s) called",
+                            cp_name)
+
+            for int_cp in self._vdud.internal_connection_point:
+                self._log.debug("Checking for int cp %s in internal connection points",
+                                int_cp.id)
+                if int_cp.id == cp_name:
+                    cp = int_cp
+                    break
+
+            if cp is None:
+                self._log.debug("Failed to find cp %s in internal connection points",
+                                cp_name)
+                msg = "Failed to find cp %s in internal connection points" % cp_name
+                raise VduRecordError(msg)
+
+            # return the VLR associated with the connection point
+            return vnfr.find_vlr_by_cp(cp_name)
+
+        block = xact.block_create()
+
+        self._log.debug("Executing vm request id: %s, action: create",
+                        self._request_id)
+
+        # Resolve the networks associated external interfaces
+        for ext_intf in self._vdud.external_interface:
+            self._log.debug("Resolving external interface name [%s], cp[%s]",
+                            ext_intf.name, ext_intf.vnfd_connection_point_ref)
+            cp = find_cp_by_name(ext_intf.vnfd_connection_point_ref)
+            if cp is None:
+                self._log.debug("Failed to find connection point - %s",
+                                ext_intf.vnfd_connection_point_ref)
+                continue
+            self._log.debug("Connection point name [%s], type[%s]",
+                            cp.name, cp.type_yang)
+
+            vlr = vnfr.ext_vlr_by_id(cp.vlr_ref)
+
+            etuple = (ext_intf, cp.name, vlr)
+            self._ext_intf.append(etuple)
+
+            self._log.debug("Created external interface tuple  : %s", etuple)
+
+        # Resolve the networks associated internal interfaces
+        for intf in self._vdud.internal_interface:
+            cp_id = intf.vdu_internal_connection_point_ref
+            self._log.debug("Resolving internal interface name [%s], cp[%s]",
+                            intf.name, cp_id)
+
+            try:
+                vlr = find_internal_vlr_by_cp_name(cp_id)
+            except Exception as e:
+                self._log.debug("Failed to find cp %s in internal VLR list", cp_id)
+                msg = "Failed to find cp %s in internal VLR list, e = %s" % (cp_id, e)
+                raise VduRecordError(msg)
+
+            ituple = (intf, cp_id, vlr)
+            self._int_intf.append(ituple)
+
+            self._log.debug("Created internal interface tuple  : %s", ituple)
+
+        resmgr_path = self.resmgr_path
+        resmgr_msg = self.resmgr_msg(config)
+
+        self._log.debug("Creating new VM request at: %s, params: %s", resmgr_path, resmgr_msg)
+        block.add_query_create(resmgr_path, resmgr_msg)
+
+        res_iter = yield from block.execute(now=True)
+
+        resp = None
+
+        for i in res_iter:
+            r = yield from i
+            resp = r.result
+
+        if resp is None or not (resp.has_field('resource_info') and resp.resource_info.has_field('resource_state')):
+            raise VMResourceError("Did not get a vm resource response (resp: %s)", resp)
+        self._log.debug("Got vm request response: %s", resp.resource_info)
+        return resp.resource_info
+
+    @asyncio.coroutine
+    def delete_resource(self, xact):
+        block = xact.block_create()
+
+        self._log.debug("Executing vm request id: %s, action: delete",
+                        self._request_id)
+
+        block.add_query_delete(self.resmgr_path)
+
+        yield from block.execute(flags=0, now=True)
+
+    @asyncio.coroutine
+    def read_resource(self, xact):
+        block = xact.block_create()
+
+        self._log.debug("Executing vm request id: %s, action: delete",
+                        self._request_id)
+
+        block.add_query_read(self.resmgr_path)
+
+        res_iter = yield from block.execute(flags=0, now=True)
+        for i in res_iter:
+            r = yield from i
+            resp = r.result
+
+        if resp is None or not (resp.has_field('resource_info') and resp.resource_info.has_field('resource_state')):
+            raise VMResourceError("Did not get a vm resource response (resp: %s)", resp)
+        self._log.debug("Got vm request response: %s", resp.resource_info)
+        #self._vm_resp = resp.resource_info
+        return resp.resource_info
+
+
+    @asyncio.coroutine
+    def start_component(self):
+        """ This VDUR is active """
+        self._log.debug("Starting component %s for  vdud %s vdur %s",
+                        self._vdud.vcs_component_ref,
+                        self._vdud,
+                        self._vdur_id)
+        yield from self._vnfr.start_component(self._vdud.vcs_component_ref,
+                                              self.vm_resp.management_ip)
+
+    @property
+    def active(self):
+        """ Is this VDU active """
+        return True if self._state is VDURecordState.READY else False
+
+    @asyncio.coroutine
+    def instantiation_failed(self, failed_reason=None):
+        """ VDU instantiation failed """
+        self._log.debug("VDU %s instantiation failed ", self._vdur_id)
+        self._state = VDURecordState.FAILED
+        self._state_failed_reason = failed_reason
+        yield from self._vnfr.instantiation_failed(failed_reason)
+
+    @asyncio.coroutine
+    def vdu_is_active(self):
+        """ This VDU is active"""
+        if self.active:
+            self._log.warning("VDU %s was already marked as active", self._vdur_id)
+            return
+
+        self._log.debug("VDUR id %s in VNFR %s is active", self._vdur_id, self._vnfr.vnfr_id)
+
+        if self._vdud.vcs_component_ref is not None:
+            yield from self.start_component()
+
+        self._state = VDURecordState.READY
+
+        if self._vnfr.all_vdus_active():
+            self._log.debug("Inside vdu_is_active. VNFR is READY. Info: %s", self._vnfr)
+            yield from self._vnfr.is_ready()
+
+    @asyncio.coroutine
+    def instantiate(self, xact, vnfr, config=None):
+        """ Instantiate this VDU """
+        self._state = VDURecordState.INSTANTIATING
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, query_action, ks_path, msg):
+            """ This VDUR is active """
+            self._log.debug("Received VDUR instantiate on_prepare (%s:%s:%s)",
+                            query_action,
+                            ks_path,
+                            msg)
+
+            if (query_action == rwdts.QueryAction.UPDATE or
+                    query_action == rwdts.QueryAction.CREATE):
+                self._vm_resp = msg
+
+                if msg.resource_state == "active":
+                    # Move this VDU to ready state
+                    yield from self.vdu_is_active()
+                elif msg.resource_state == "failed":
+                    yield from self.instantiation_failed(msg.resource_errors)
+            elif query_action == rwdts.QueryAction.DELETE:
+                self._log.debug("DELETE action in on_prepare for VDUR instantiation, ignoring")
+            else:
+                raise NotImplementedError(
+                    "%s action on VirtualDeployementUnitRecord not supported",
+                    query_action)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        try:
+            reg_event = asyncio.Event(loop=self._loop)
+
+            @asyncio.coroutine
+            def on_ready(regh, status):
+                reg_event.set()
+
+            handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare, on_ready=on_ready)
+            self._rm_regh = yield from self._dts.register(self.resmgr_path + '/resource-info',
+                                                          flags=rwdts.Flag.SUBSCRIBER,
+                                                          handler=handler)
+            yield from reg_event.wait()
+
+            vm_resp = yield from self.create_resource(xact, vnfr, config)
+            self._vm_resp = vm_resp
+
+            self._state = VDURecordState.RESOURCE_ALLOC_PENDING
+            self._log.debug("Requested VM from resource manager response %s",
+                            vm_resp)
+            if vm_resp.resource_state == "active":
+                self._log.debug("Resourcemgr responded wih an active vm resp %s",
+                                vm_resp)
+                yield from self.vdu_is_active()
+                self._state = VDURecordState.READY
+            elif (vm_resp.resource_state == "pending" or
+                  vm_resp.resource_state == "inactive"):
+                self._log.debug("Resourcemgr responded wih a pending vm resp %s",
+                                vm_resp)
+                # handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
+                # self._rm_regh = yield from self._dts.register(self.resmgr_path + '/resource-info',
+                #                                              flags=rwdts.Flag.SUBSCRIBER,
+                #                                              handler=handler)
+            else:
+                self._log.debug("Resourcemgr responded wih an error vm resp %s",
+                                vm_resp)
+                raise VirtualDeploymentUnitRecordError(
+                    "Failed VDUR instantiation %s " % vm_resp)
+
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            self._log.exception(e)
+            self._log.error("Instantiation of VDU record failed: %s", str(e))
+            self._state = VDURecordState.FAILED
+            yield from self.instantiation_failed(str(e))
+
+
+class VlRecordState(enum.Enum):
+    """ VL Record State """
+    INIT = 101
+    INSTANTIATION_PENDING = 102
+    ACTIVE = 103
+    TERMINATE_PENDING = 104
+    TERMINATED = 105
+    FAILED = 106
+
+
+class InternalVirtualLinkRecord(object):
+    """ Internal Virtual Link record """
+    def __init__(self, dts, log, loop, ivld_msg, vnfr_name, cloud_account_name):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._ivld_msg = ivld_msg
+        self._vnfr_name = vnfr_name
+        self._cloud_account_name = cloud_account_name
+
+        self._vlr_req = self.create_vlr()
+        self._vlr = None
+        self._state = VlRecordState.INIT
+
+    @property
+    def vlr_id(self):
+        """ Find VLR by id """
+        return self._vlr_req.id
+
+    @property
+    def name(self):
+        """ Name of this VL """
+        return self._vnfr_name + "." + self._ivld_msg.name
+
+    @property
+    def network_id(self):
+        """ Find VLR by id """
+        return self._vlr.network_id if self._vlr else None
+
+    def vlr_path(self):
+        """ VLR path for this VLR instance"""
+        return "D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".format(self.vlr_id)
+
+    def create_vlr(self):
+        """ Create the VLR record which will be instantiated """
+
+        vld_fields = ["short_name",
+                      "vendor",
+                      "description",
+                      "version",
+                      "type_yang",
+                      "provider_network"]
+
+        vld_copy_dict = {k: v for k, v in self._ivld_msg.as_dict().items() if k in vld_fields}
+
+        vlr_dict = {"id": str(uuid.uuid4()),
+                    "name": self.name,
+                    "cloud_account": self._cloud_account_name,
+                    }
+        vlr_dict.update(vld_copy_dict)
+
+        vlr = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict(vlr_dict)
+        return vlr
+
+    @asyncio.coroutine
+    def instantiate(self, xact, restart_mode=False):
+        """ Instantiate VL """
+
+        @asyncio.coroutine
+        def instantiate_vlr():
+            """ Instantiate VLR"""
+            self._log.debug("Create VL with xpath %s and vlr %s",
+                            self.vlr_path(), self._vlr_req)
+
+            with self._dts.transaction(flags=0) as xact:
+                block = xact.block_create()
+                block.add_query_create(xpath=self.vlr_path(), msg=self._vlr_req)
+                self._log.debug("Executing VL create path:%s msg:%s",
+                                self.vlr_path(), self._vlr_req)
+
+                res_iter = None
+                try:
+                    res_iter = yield from block.execute()
+                except Exception:
+                    self._state = VlRecordState.FAILED
+                    self._log.exception("Caught exception while instantial VL")
+                    raise
+
+                for ent in res_iter:
+                    res = yield from ent
+                    self._vlr = res.result
+
+            if self._vlr.operational_status == 'failed':
+                self._log.debug("VL creation failed for vlr id %s", self._vlr.id)
+                self._state = VlRecordState.FAILED
+                raise VnfrInstantiationFailed("instantiation due to VL failure %s" % (self._vlr.id))
+
+            self._log.info("Created VL with xpath %s and vlr %s",
+                           self.vlr_path(), self._vlr)
+
+        @asyncio.coroutine
+        def get_vlr():
+            """ Get the network id """
+            res_iter = yield from self._dts.query_read(self.vlr_path(), rwdts.XactFlag.MERGE)
+            vlr = None
+            for ent in res_iter:
+                res = yield from ent
+                vlr = res.result
+
+            if vlr is None:
+                err = "Failed to get VLR for path  %s" % self.vlr_path()
+                self._log.warn(err)
+                raise InternalVirtualLinkRecordError(err)
+            return vlr
+
+        self._state = VlRecordState.INSTANTIATION_PENDING
+
+        if restart_mode:
+            vl = yield from get_vlr()
+            if vl is None:
+                yield from instantiate_vlr()
+        else:
+            yield from instantiate_vlr()
+
+        self._state = VlRecordState.ACTIVE
+
+    def vlr_in_vns(self):
+        """ Is there a VLR record in VNS """
+        if (self._state == VlRecordState.ACTIVE or
+                self._state == VlRecordState.INSTANTIATION_PENDING or
+                self._state == VlRecordState.FAILED):
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def terminate(self, xact):
+        """Terminate this VL """
+        if not self.vlr_in_vns():
+            self._log.debug("Ignoring terminate request for id %s in state %s",
+                            self.vlr_id, self._state)
+            return
+
+        self._log.debug("Terminating VL with path %s", self.vlr_path())
+        self._state = VlRecordState.TERMINATE_PENDING
+        block = xact.block_create()
+        block.add_query_delete(self.vlr_path())
+        yield from block.execute(flags=0, now=True)
+        self._state = VlRecordState.TERMINATED
+        self._log.debug("Terminated VL with path %s", self.vlr_path())
+
+
+class VirtualNetworkFunctionRecord(object):
+    """ Virtual Network Function Record """
+    def __init__(self, dts, log, loop, cluster_name, vnfm, vcs_handler, vnfr_msg):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._cluster_name = cluster_name
+        self._vnfr_msg = vnfr_msg
+        self._vnfr_id = vnfr_msg.id
+        self._vnfd_id = vnfr_msg.vnfd_ref
+        self._vnfm = vnfm
+        self._vcs_handler = vcs_handler
+        self._vnfr = vnfr_msg
+
+        self._vnfd = None
+        self._state = VirtualNetworkFunctionRecordState.INIT
+        self._state_failed_reason = None
+        self._ext_vlrs = {}  # The list of external virtual links
+        self._vlrs = []  # The list of internal virtual links
+        self._vdus = []  # The list of vdu
+        self._vlr_by_cp = {}
+        self._cprs = []
+        self._inventory = {}
+        self._create_time = int(time.time())
+        self._vnf_mon = None
+        self._config_status = vnfr_msg.config_status
+        self._vnfd_package_store = rift.package.store.VnfdPackageFilesystemStore(self._log)
+
+    def _get_vdur_from_vdu_id(self, vdu_id):
+        self._log.debug("Finding vdur for vdu_id %s", vdu_id)
+        self._log.debug("Searching through vdus: %s", self._vdus)
+        for vdu in self._vdus:
+            self._log.debug("vdu_id: %s", vdu.vdu_id)
+            if vdu.vdu_id == vdu_id:
+                return vdu
+
+        raise VDURecordNotFound("Could not find vdu record from id: %s", vdu_id)
+
+    @property
+    def operational_status(self):
+        """ Operational status of this VNFR """
+        op_status_map = {"INIT": "init",
+                         "VL_INIT_PHASE": "vl_init_phase",
+                         "VM_INIT_PHASE": "vm_init_phase",
+                         "READY": "running",
+                         "TERMINATE": "terminate",
+                         "VL_TERMINATE_PHASE": "vl_terminate_phase",
+                         "VDU_TERMINATE_PHASE": "vm_terminate_phase",
+                         "TERMINATED": "terminated",
+                         "FAILED": "failed", }
+        return op_status_map[self._state.name]
+
+    @property
+    def vnfd_xpath(self):
+        """ VNFD xpath associated with this VNFR """
+        return("C,/vnfd:vnfd-catalog/"
+               "vnfd:vnfd[vnfd:id = '{}']".format(self._vnfd_id))
+
+    @property
+    def vnfd(self):
+        """ VNFD for this VNFR """
+        return self._vnfd
+
+    @property
+    def vnf_name(self):
+        """ VNFD name associated with this VNFR """
+        return self.vnfd.name
+
+    @property
+    def name(self):
+        """ Name of this VNF in the record """
+        return self._vnfr.name
+
+    @property
+    def cloud_account_name(self):
+        """ Name of the cloud account this VNFR is instantiated in """
+        return self._vnfr.cloud_account
+
+    @property
+    def vnfd_id(self):
+        """ VNFD Id associated with this VNFR """
+        return self.vnfd.id
+
+    @property
+    def vnfr_id(self):
+        """ VNFR Id associated with this VNFR """
+        return self._vnfr_id
+
+    @property
+    def member_vnf_index(self):
+        """ Member VNF index associated with this VNFR """
+        return self._vnfr.member_vnf_index_ref
+
+    @property
+    def config_status(self):
+        """ Config agent status for this VNFR """
+        return self._config_status
+
+    def component_by_name(self, component_name):
+        """ Find a component by name in the inventory list"""
+        mangled_name = VcsComponent.mangle_name(component_name,
+                                                self.vnf_name,
+                                                self.vnfd_id)
+        return self._inventory[mangled_name]
+
+
+
+    @asyncio.coroutine
+    def get_nsr_config(self):
+        ### Need access to NS instance configuration for runtime resolution.
+        ### This shall be replaced when deployment flavors are implemented
+        xpath = "C,/nsr:ns-instance-config"
+        results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
+
+        for result in results:
+            entry = yield from result
+            ns_instance_config = entry.result
+            for nsr in ns_instance_config.nsr:
+                if nsr.id == self._vnfr_msg.nsr_id_ref:
+                    return nsr
+        return None
+
+    @asyncio.coroutine
+    def start_component(self, component_name, ip_addr):
+        """ Start a component in the VNFR by name """
+        comp = self.component_by_name(component_name)
+        yield from comp.start(None, None, ip_addr)
+
+    def cp_ip_addr(self, cp_name):
+        """ Get ip address for connection point """
+        self._log.debug("cp_ip_addr()")
+        for cp in self._cprs:
+            if cp.name == cp_name and cp.ip_address is not None:
+                return cp.ip_address
+        return "0.0.0.0"
+
+    def mgmt_intf_info(self):
+        """ Get Management interface info for this VNFR """
+        mgmt_intf_desc = self.vnfd.msg.mgmt_interface
+        ip_addr = None
+        if mgmt_intf_desc.has_field("cp"):
+            ip_addr = self.cp_ip_addr(mgmt_intf_desc.cp)
+        elif mgmt_intf_desc.has_field("vdu_id"):
+            try:
+                vdur = self._get_vdur_from_vdu_id(mgmt_intf_desc.vdu_id)
+                ip_addr = vdur.management_ip
+            except VDURecordNotFound:
+                self._log.debug("Did not find mgmt interface for vnfr id %s", self._vnfr_id)
+                ip_addr = None
+        else:
+            ip_addr = mgmt_intf_desc.ip_address
+        port = mgmt_intf_desc.port
+
+        return ip_addr, port
+
+    @property
+    def msg(self):
+        """ Message associated with this VNFR """
+        vnfd_fields = ["short_name", "vendor", "description", "version"]
+        vnfd_copy_dict = {k: v for k, v in self.vnfd.msg.as_dict().items() if k in vnfd_fields}
+
+        mgmt_intf = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MgmtInterface()
+        ip_address, port = self.mgmt_intf_info()
+
+        if ip_address is not None:
+            mgmt_intf.ip_address = ip_address
+        if port is not None:
+            mgmt_intf.port = port
+
+        vnfr_dict = {"id": self._vnfr_id,
+                     "nsr_id_ref": self._vnfr_msg.nsr_id_ref,
+                     "name": self.name,
+                     "member_vnf_index_ref": self.member_vnf_index,
+                     "vnfd_ref": self.vnfd_id,
+                     "operational_status": self.operational_status,
+                     "operational_status_details": self._state_failed_reason,
+                     "cloud_account": self.cloud_account_name,
+                     "config_status": self._config_status
+                     }
+
+        vnfr_dict.update(vnfd_copy_dict)
+
+        vnfr_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
+        vnfr_msg.mgmt_interface = mgmt_intf
+
+        # Add all the VLRs  to  VNFR
+        for vlr in self._vlrs:
+            ivlr = vnfr_msg.internal_vlr.add()
+            ivlr.vlr_ref = vlr.vlr_id
+
+        # Add all the VDURs to VDUR
+        if self._vdus is not None:
+            for vdu in self._vdus:
+                vdur = vnfr_msg.vdur.add()
+                vdur.from_dict(vdu.msg.as_dict())
+
+        if self.vnfd.msg.mgmt_interface.has_field('dashboard_params'):
+            vnfr_msg.dashboard_url = self.dashboard_url
+
+        for cpr in self._cprs:
+            new_cp = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr.as_dict())
+            vnfr_msg.connection_point.append(new_cp)
+
+        if self._vnf_mon is not None:
+            for monp in self._vnf_mon.msg:
+                vnfr_msg.monitoring_param.append(
+                    VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict(monp.as_dict()))
+
+        if self._vnfr.vnf_configuration is not None:
+            vnfr_msg.vnf_configuration.from_dict(self._vnfr.vnf_configuration.as_dict())
+            if (ip_address is not None and
+                    vnfr_msg.vnf_configuration.config_access.mgmt_ip_address is None):
+                vnfr_msg.vnf_configuration.config_access.mgmt_ip_address = ip_address
+
+        for group in self._vnfr_msg.placement_groups_info:
+            group_info = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_PlacementGroupsInfo()
+            group_info.from_dict(group.as_dict())
+            vnfr_msg.placement_groups_info.append(group_info)
+
+        return vnfr_msg
+
+    @property
+    def dashboard_url(self):
+        ip, cfg_port = self.mgmt_intf_info()
+        protocol = 'http'
+        http_port = 80
+        if self.vnfd.msg.mgmt_interface.dashboard_params.has_field('https'):
+            if self.vnfd.msg.mgmt_interface.dashboard_params.https is True:
+                protocol = 'https'
+                http_port = 443
+        if self.vnfd.msg.mgmt_interface.dashboard_params.has_field('port'):
+            http_port = self.vnfd.msg.mgmt_interface.dashboard_params.port
+
+        url = "{protocol}://{ip_address}:{port}/{path}".format(
+                protocol=protocol,
+                ip_address=ip,
+                port=http_port,
+                path=self.vnfd.msg.mgmt_interface.dashboard_params.path.lstrip("/"),
+                )
+
+        return url
+
+    @property
+    def xpath(self):
+        """ path for this  VNFR """
+        return("D,/vnfr:vnfr-catalog"
+               "/vnfr:vnfr[vnfr:id='{}']".format(self.vnfr_id))
+
+    @asyncio.coroutine
+    def publish(self, xact):
+        """ publish this VNFR """
+        vnfr = self.msg
+        self._log.debug("Publishing VNFR path = [%s], record = [%s]",
+                        self.xpath, self.msg)
+        vnfr.create_time = self._create_time
+        yield from self._vnfm.publish_vnfr(xact, self.xpath, self.msg)
+        self._log.debug("Published VNFR path = [%s], record = [%s]",
+                        self.xpath, self.msg)
+
+    @asyncio.coroutine
+    def create_vls(self):
+        """ Publish The VLs associated with this VNF """
+        self._log.debug("Publishing Internal Virtual Links for vnfd id: %s",
+                        self.vnfd_id)
+        for ivld_msg in self.vnfd.msg.internal_vld:
+            self._log.debug("Creating internal vld:"
+                            " %s, int_cp_ref = %s",
+                            ivld_msg, ivld_msg.internal_connection_point_ref
+                            )
+            vlr = InternalVirtualLinkRecord(dts=self._dts,
+                                            log=self._log,
+                                            loop=self._loop,
+                                            ivld_msg=ivld_msg,
+                                            vnfr_name=self.name,
+                                            cloud_account_name=self.cloud_account_name
+                                            )
+            self._vlrs.append(vlr)
+
+            for int_cp in ivld_msg.internal_connection_point_ref:
+                if int_cp in self._vlr_by_cp:
+                    msg = ("Connection point %s already "
+                           " bound %s" % (int_cp, self._vlr_by_cp[int_cp]))
+                    raise InternalVirtualLinkRecordError(msg)
+                self._log.debug("Setting vlr %s to internal cp = %s",
+                                vlr, int_cp)
+                self._vlr_by_cp[int_cp] = vlr
+
+    @asyncio.coroutine
+    def instantiate_vls(self, xact, restart_mode=False):
+        """ Instantiate the VLs associated with this VNF """
+        self._log.debug("Instantiating Internal Virtual Links for vnfd id: %s",
+                        self.vnfd_id)
+
+        for vlr in self._vlrs:
+            self._log.debug("Instantiating VLR %s", vlr)
+            yield from vlr.instantiate(xact, restart_mode)
+
+    def find_vlr_by_cp(self, cp_name):
+        """ Find the VLR associated with the cp name """
+        return self._vlr_by_cp[cp_name]
+
+    def resolve_placement_group_cloud_construct(self, input_group, nsr_config):
+        """
+        Returns the cloud specific construct for placement group
+        Arguments:
+            input_group: VNFD PlacementGroup
+            nsr_config: Configuration for VNFDGroup MAP in the NSR config
+        """
+        copy_dict = ['name', 'requirement', 'strategy']
+        for group_info in nsr_config.vnfd_placement_group_maps:
+            if group_info.placement_group_ref == input_group.name and \
+               group_info.vnfd_id_ref == self.vnfd_id:
+                group = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
+                group_dict = {k:v for k,v in
+                              group_info.as_dict().items()
+                              if (k != 'placement_group_ref' and k !='vnfd_id_ref')}
+                for param in copy_dict:
+                    group_dict.update({param: getattr(input_group, param)})
+                group.from_dict(group_dict)
+                return group
+        return None
+
+    @asyncio.coroutine
+    def get_vdu_placement_groups(self, vdu):
+        placement_groups = []
+        ### Step-1: Get VNF level placement groups
+        for group in self._vnfr_msg.placement_groups_info:
+            #group_info = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
+            #group_info.from_dict(group.as_dict())
+            placement_groups.append(group)
+
+        ### Step-2: Get NSR config. This is required for resolving placement_groups cloud constructs
+        nsr_config = yield from self.get_nsr_config()
+
+        ### Step-3: Get VDU level placement groups
+        for group in self.vnfd.msg.placement_groups:
+            for member_vdu in group.member_vdus:
+                if member_vdu.member_vdu_ref == vdu.id:
+                    group_info = self.resolve_placement_group_cloud_construct(group,
+                                                                              nsr_config)
+                    if group_info is None:
+                        self._log.info("Could not resolve cloud-construct for placement group: %s", group.name)
+                        ### raise VNFMPlacementGroupError("Could not resolve cloud-construct for placement group: {}".format(group.name))
+                    else:
+                        self._log.info("Successfully resolved cloud construct for placement group: %s for VDU: %s in VNF: %s (Member Index: %s)",
+                                       str(group_info),
+                                       vdu.name,
+                                       self.vnf_name,
+                                       self.member_vnf_index)
+                        placement_groups.append(group_info)
+
+        return placement_groups
+
+    @asyncio.coroutine
+    def create_vdus(self, vnfr, restart_mode=False):
+        """ Create the VDUs associated with this VNF """
+
+        def get_vdur_id(vdud):
+            """Get the corresponding VDUR's id for the VDUD. This is useful in
+            case of a restart.
+
+            In restart mode we check for exiting VDUR's ID and use them, if
+            available. This way we don't end up creating duplicate VDURs
+            """
+            vdur_id = None
+
+            if restart_mode and vdud is not None:
+                try:
+                    vdur = [vdur.id for vdur in vnfr._vnfr.vdur if vdur.vdu_id_ref == vdud.id]
+                    vdur_id = vdur[0]
+                except IndexError:
+                    self._log.error("Unable to find a VDUR for VDUD {}".format(vdud))
+
+            return vdur_id
+
+
+        self._log.info("Creating VDU's for vnfd id: %s", self.vnfd_id)
+        for vdu in self.vnfd.msg.vdu:
+            self._log.debug("Creating vdu: %s", vdu)
+            vdur_id = get_vdur_id(vdu)
+
+            placement_groups = yield from self.get_vdu_placement_groups(vdu)
+            self._log.info("Launching VDU: %s from VNFD :%s (Member Index: %s) with Placement Groups: %s",
+                           vdu.name,
+                           self.vnf_name,
+                           self.member_vnf_index,
+                           [ group.name for group in placement_groups])
+
+            vdur = VirtualDeploymentUnitRecord(
+                dts=self._dts,
+                log=self._log,
+                loop=self._loop,
+                vdud=vdu,
+                vnfr=vnfr,
+                mgmt_intf=self.has_mgmt_interface(vdu),
+                cloud_account_name=self.cloud_account_name,
+                vnfd_package_store=self._vnfd_package_store,
+                vdur_id=vdur_id,
+                placement_groups = placement_groups,
+                )
+            yield from vdur.vdu_opdata_register()
+
+            self._vdus.append(vdur)
+
+    @asyncio.coroutine
+    def instantiate_vdus(self, xact, vnfr):
+        """ Instantiate the VDUs associated with this VNF """
+        self._log.debug("Instantiating VDU's for vnfd id %s: %s", self.vnfd_id, self._vdus)
+
+        lookup = {vdu.vdu_id: vdu for vdu in self._vdus}
+
+        # Identify any dependencies among the VDUs
+        dependencies = collections.defaultdict(list)
+        vdu_id_pattern = re.compile(r"\{\{ vdu\[([^]]+)\]\S* \}\}")
+
+        for vdu in self._vdus:
+            if vdu.vdud_cloud_init is not None:
+                for vdu_id in vdu_id_pattern.findall(vdu.vdud_cloud_init):
+                    if vdu_id != vdu.vdu_id:
+                        # This means that vdu.vdu_id depends upon vdu_id,
+                        # i.e. vdu_id must be instantiated before
+                        # vdu.vdu_id.
+                        dependencies[vdu.vdu_id].append(lookup[vdu_id])
+
+        # Define the terminal states of VDU instantiation
+        terminal = (
+                VDURecordState.READY,
+                VDURecordState.TERMINATED,
+                VDURecordState.FAILED,
+                )
+
+        datastore = VdurDatastore()
+        processed = set()
+
+        @asyncio.coroutine
+        def instantiate_monitor(vdu):
+            """Monitor the state of the VDU during instantiation
+
+            Arguments:
+                vdu - a VirtualDeploymentUnitRecord
+
+            """
+            # wait for the VDUR to enter a terminal state
+            while vdu._state not in terminal:
+                yield from asyncio.sleep(1, loop=self._loop)
+
+            # update the datastore
+            datastore.update(vdu)
+
+            # add the VDU to the set of processed VDUs
+            processed.add(vdu.vdu_id)
+
+        @asyncio.coroutine
+        def instantiate(vdu):
+            """Instantiate the specified VDU
+
+            Arguments:
+                vdu - a VirtualDeploymentUnitRecord
+
+            Raises:
+                if the VDU, or any of the VDUs this VDU depends upon, are
+                terminated or fail to instantiate properly, a
+                VirtualDeploymentUnitRecordError is raised.
+
+            """
+            for dependency in dependencies[vdu.vdu_id]:
+                self._log.debug("{}: waiting for {}".format(vdu.vdu_id, dependency.vdu_id))
+
+                while dependency.vdu_id not in processed:
+                    yield from asyncio.sleep(1, loop=self._loop)
+
+                if not dependency.active:
+                    raise VirtualDeploymentUnitRecordError()
+
+            self._log.debug('instantiating {}'.format(vdu.vdu_id))
+
+            # Populate the datastore with the current values of the VDU
+            datastore.add(vdu)
+
+            # Substitute any variables contained in the cloud config script
+            config = str(vdu.vdud_cloud_init)
+
+            parts = re.split("\{\{ ([^\}]+) \}\}", config)
+            if len(parts) > 1:
+
+                # Extract the variable names
+                variables = list()
+                for variable in parts[1::2]:
+                    variables.append(variable.lstrip('{{').rstrip('}}').strip())
+
+                # Iterate of the variables and substitute values from the
+                # datastore.
+                for variable in variables:
+
+                    # Handle a reference to a VDU by ID
+                    if variable.startswith('vdu['):
+                        value = datastore.get(variable)
+                        if value is None:
+                            msg = "Unable to find a substitute for {} in {} cloud-init script"
+                            raise ValueError(msg.format(variable, vdu.vdu_id))
+
+                        config = config.replace("{{ %s }}" % variable, value)
+                        continue
+
+                    # Handle a reference to the current VDU
+                    if variable.startswith('vdu'):
+                        value = datastore.get('vdu[{}]'.format(vdu.vdu_id) + variable[3:])
+                        config = config.replace("{{ %s }}" % variable, value)
+                        continue
+
+                    # Handle unrecognized variables
+                    msg = 'unrecognized cloud-config variable: {}'
+                    raise ValueError(msg.format(variable))
+
+            # Instantiate the VDU
+            with self._dts.transaction() as xact:
+                self._log.debug("Instantiating vdu: %s", vdu)
+                yield from vdu.instantiate(xact, vnfr, config=config)
+                if self._state == VirtualNetworkFunctionRecordState.FAILED:
+                    self._log.error("Instatiation of VNF %s failed while instantiating vdu %s",
+                                    self.vnfr_id, vdu)
+
+        # First create a set of tasks to monitor the state of the VDUs and
+        # report when they have entered a terminal state
+        for vdu in self._vdus:
+            self._loop.create_task(instantiate_monitor(vdu))
+
+        for vdu in self._vdus:
+            self._loop.create_task(instantiate(vdu))
+
+    def has_mgmt_interface(self, vdu):
+        # ## TODO: Support additional mgmt_interface type options
+        if self.vnfd.msg.mgmt_interface.vdu_id == vdu.id:
+            return True
+        return False
+
+    def vlr_xpath(self, vlr_id):
+        """ vlr xpath """
+        return(
+            "D,/vlr:vlr-catalog/"
+            "vlr:vlr[vlr:id = '{}']".format(vlr_id))
+
+    def ext_vlr_by_id(self, vlr_id):
+        """ find ext vlr by id """
+        return self._ext_vlrs[vlr_id]
+
+    @asyncio.coroutine
+    def publish_inventory(self, xact):
+        """ Publish the inventory associated with this VNF """
+        self._log.debug("Publishing inventory for VNFR id: %s", self._vnfr_id)
+
+        for component in self.vnfd.msg.component:
+            self._log.debug("Creating inventory component %s", component)
+            mangled_name = VcsComponent.mangle_name(component.component_name,
+                                                    self.vnf_name,
+                                                    self.vnfd_id
+                                                    )
+            comp = VcsComponent(dts=self._dts,
+                                log=self._log,
+                                loop=self._loop,
+                                cluster_name=self._cluster_name,
+                                vcs_handler=self._vcs_handler,
+                                component=component,
+                                mangled_name=mangled_name,
+                                )
+            if comp.name in self._inventory:
+                self._log.debug("Duplicate entries in inventory  %s for vnfr %s",
+                                component, self._vnfd_id)
+                return
+            self._log.debug("Adding component %s for vnrf %s",
+                            comp.name, self._vnfr_id)
+            self._inventory[comp.name] = comp
+            yield from comp.publish(xact)
+
+    def all_vdus_active(self):
+        """ Are all VDUS in this VNFR active? """
+        for vdu in self._vdus:
+            if not vdu.active:
+                return False
+
+        self._log.debug("Inside all_vdus_active. Returning True")
+        return True
+
+    @asyncio.coroutine
+    def instantiation_failed(self, failed_reason=None):
+        """ VNFR instantiation failed """
+        self._log.debug("VNFR %s instantiation failed ", self.vnfr_id)
+        self.set_state(VirtualNetworkFunctionRecordState.FAILED)
+        self._state_failed_reason = failed_reason
+
+        # Update the VNFR with the changed status
+        yield from self.publish(None)
+
+    @asyncio.coroutine
+    def is_ready(self):
+        """ This VNF is ready"""
+        self._log.debug("VNFR id %s is ready", self.vnfr_id)
+
+        if self._state != VirtualNetworkFunctionRecordState.FAILED:
+            self.set_state(VirtualNetworkFunctionRecordState.READY)
+
+        else:
+            self._log.debug("VNFR id %s ignoring state change", self.vnfr_id)
+
+        # Update the VNFR with the changed status
+        yield from self.publish(None)
+
+    def update_cp(self, cp_name, ip_address, cp_id):
+        """Updated the connection point with ip address"""
+        for cp in self._cprs:
+            if cp.name == cp_name:
+                self._log.debug("Setting ip address and id for cp %s, cpr %s with ip %s id %s",
+                                cp_name, cp, ip_address, cp_id)
+                cp.ip_address = ip_address
+                cp.connection_point_id = cp_id
+                return
+
+        err = "No connection point %s found in VNFR id %s" % (cp.name, self._vnfr_id)
+        self._log.debug(err)
+        raise VirtualDeploymentUnitRecordError(err)
+
+    def set_state(self, state):
+        """ Set state for this VNFR"""
+        self._state = state
+
+    @asyncio.coroutine
+    def instantiate(self, xact, restart_mode=False):
+        """ instantiate this VNF """
+        self.set_state(VirtualNetworkFunctionRecordState.VL_INIT_PHASE)
+
+        @asyncio.coroutine
+        def fetch_vlrs():
+            """ Fetch VLRs """
+            # Iterate over all the connection points in VNFR and fetch the
+            # associated VLRs
+
+            def cpr_from_cp(cp):
+                """ Creates a record level connection point from the desciptor cp"""
+                cp_fields = ["name", "image", "vm-flavor"]
+                cp_copy_dict = {k: v for k, v in cp.as_dict().items() if k in cp_fields}
+                cpr_dict = {}
+                cpr_dict.update(cp_copy_dict)
+                return VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr_dict)
+
+            self._log.debug("Fetching VLRs for VNFR id = %s, cps = %s",
+                            self._vnfr_id, self._vnfr.connection_point)
+
+            for cp in self._vnfr.connection_point:
+                cpr = cpr_from_cp(cp)
+                self._cprs.append(cpr)
+                self._log.debug("Adding Connection point record  %s ", cp)
+
+                vlr_path = self.vlr_xpath(cp.vlr_ref)
+                self._log.debug("Fetching VLR with path = %s", vlr_path)
+                res_iter = yield from self._dts.query_read(self.vlr_xpath(cp.vlr_ref),
+                                                           rwdts.XactFlag.MERGE)
+                for i in res_iter:
+                    r = yield from i
+                    d = r.result
+                    self._ext_vlrs[cp.vlr_ref] = d
+                    cpr.vlr_ref = cp.vlr_ref
+                    self._log.debug("Fetched VLR [%s] with path = [%s]", d, vlr_path)
+
+        # Fetch the VNFD associated with the VNFR
+        self._log.debug("VNFR-ID %s: Fetching vnfds", self._vnfr_id)
+        self._vnfd = yield from self._vnfm.get_vnfd_ref(self._vnfd_id)
+        self._log.debug("VNFR-ID %s: Fetched vnfd:%s", self._vnfr_id, self._vnfd)
+
+        assert self.vnfd is not None
+
+        # Fetch External VLRs
+        self._log.debug("VNFR-ID %s: Fetching vlrs", self._vnfr_id)
+        yield from fetch_vlrs()
+
+        # Publish inventory
+        self._log.debug("VNFR-ID %s: Publishing Inventory", self._vnfr_id)
+        yield from self.publish_inventory(xact)
+
+        # Publish inventory
+        self._log.debug("VNFR-ID %s: Creating VLs", self._vnfr_id)
+        yield from self.create_vls()
+
+        # publish the VNFR
+        self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id)
+        yield from self.publish(xact)
+
+        # instantiate VLs
+        self._log.debug("VNFR-ID %s: Instantiate VLs", self._vnfr_id)
+        try:
+            yield from self.instantiate_vls(xact, restart_mode)
+        except Exception as e:
+            self._log.exception("VL instantiation failed (%s)", str(e))
+            yield from self.instantiation_failed(str(e))
+            return
+
+        self.set_state(VirtualNetworkFunctionRecordState.VM_INIT_PHASE)
+
+        # instantiate VDUs
+        self._log.debug("VNFR-ID %s: Create VDUs", self._vnfr_id)
+        yield from self.create_vdus(self, restart_mode)
+
+        # publish the VNFR
+        self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id)
+        yield from self.publish(xact)
+
+        # instantiate VDUs
+        # ToDo: Check if this should be prevented during restart
+        self._log.debug("VNFR-ID %s: Instantiate VDUs", self._vnfr_id)
+        _ = self._loop.create_task(self.instantiate_vdus(xact, self))
+
+        # publish the VNFR
+        self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id)
+        yield from self.publish(xact)
+
+        self._log.debug("VNFR-ID %s: Instantiation Done", self._vnfr_id)
+
+    @asyncio.coroutine
+    def terminate(self, xact):
+        """ Terminate this virtual network function """
+
+        self._log.debug("Terminatng VNF id %s", self.vnfr_id)
+
+        self.set_state(VirtualNetworkFunctionRecordState.TERMINATE)
+
+        # stop monitoring
+        if self._vnf_mon is not None:
+            self._vnf_mon.stop()
+            self._vnf_mon.deregister()
+            self._vnf_mon = None
+
+        @asyncio.coroutine
+        def terminate_vls():
+            """ Terminate VLs in this VNF """
+            for vl in self._vlrs:
+                yield from vl.terminate(xact)
+
+        @asyncio.coroutine
+        def terminate_vdus():
+            """ Terminate VDUS in this VNF """
+            for vdu in self._vdus:
+                yield from vdu.terminate(xact)
+
+        self._log.debug("Terminatng VLs in VNF id %s", self.vnfr_id)
+        self.set_state(VirtualNetworkFunctionRecordState.VL_TERMINATE_PHASE)
+        yield from terminate_vls()
+
+        self._log.debug("Terminatng VDUs in VNF id %s", self.vnfr_id)
+        self.set_state(VirtualNetworkFunctionRecordState.VDU_TERMINATE_PHASE)
+        yield from terminate_vdus()
+
+        self._log.debug("Terminated  VNF id %s", self.vnfr_id)
+        self.set_state(VirtualNetworkFunctionRecordState.TERMINATED)
+
+
+class VnfdDtsHandler(object):
+    """ DTS handler for VNFD config changes """
+    XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+
+    def __init__(self, dts, log, loop, vnfm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnfm = vnfm
+        self._regh = None
+
+    @asyncio.coroutine
+    def regh(self):
+        """ DTS registration handle """
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for VNFD configuration"""
+
+        def on_apply(dts, acg, xact, action, scratch):
+            """Apply the  configuration"""
+            self._log.debug("Got VNFM VNFD apply (xact: %s) (action: %s)(scr: %s)",
+                            xact, action, scratch)
+
+            is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
+            # Create/Update a VNFD record
+            for cfg in self._regh.get_xact_elements(xact):
+                # Only interested in those VNFD cfgs whose ID was received in prepare callback
+                if cfg.id in scratch.get('vnfds', []) or is_recovery:
+                    self._vnfm.update_vnfd(cfg)
+
+            scratch.pop('vnfds', None)
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ on prepare callback """
+            self._log.debug("Got on prepare for VNFD (path: %s) (action: %s)",
+                            ks_path.to_xpath(RwVnfmYang.get_schema()), msg)
+            fref = ProtobufC.FieldReference.alloc()
+            fref.goto_whole_message(msg.to_pbcm())
+
+            # Handle deletes in prepare_callback, but adds/updates in apply_callback
+            if fref.is_field_deleted():
+                # Delete an VNFD record
+                self._log.debug("Deleting VNFD with id %s", msg.id)
+                if self._vnfm.vnfd_in_use(msg.id):
+                    self._log.debug("Cannot delete VNFD in use - %s", msg)
+                    err = "Cannot delete a VNFD in use - %s" % msg
+                    raise VirtualNetworkFunctionDescriptorRefCountExists(err)
+                # Delete a VNFD record
+                yield from self._vnfm.delete_vnfd(msg.id)
+            else:
+                # Handle actual adds/updates in apply_callback,
+                # just check if VNFD in use in prepare_callback
+                if self._vnfm.vnfd_in_use(msg.id):
+                    self._log.debug("Cannot modify an VNFD in use - %s", msg)
+                    err = "Cannot modify an VNFD in use - %s" % msg
+                    raise VirtualNetworkFunctionDescriptorRefCountExists(err)
+
+                # Add this VNFD to scratch to create/update in apply callback
+                vnfds = scratch.setdefault('vnfds', [])
+                vnfds.append(msg.id)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        self._log.debug(
+            "Registering for VNFD config using xpath: %s",
+            VnfdDtsHandler.XPATH,
+            )
+        acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
+        with self._dts.appconf_group_create(handler=acg_hdl) as acg:
+            self._regh = acg.register(
+                xpath=VnfdDtsHandler.XPATH,
+                flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
+                on_prepare=on_prepare)
+
+
+class VcsComponentDtsHandler(object):
+    """ Vcs Component DTS handler """
+    XPATH = ("D,/rw-manifest:manifest" +
+             "/rw-manifest:operational-inventory" +
+             "/rw-manifest:component")
+
+    def __init__(self, dts, log, loop, vnfm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._regh = None
+        self._vnfm = vnfm
+
+    @property
+    def regh(self):
+        """ DTS registration handle """
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Registers VCS component dts publisher registration"""
+        self._log.debug("VCS Comp publisher DTS handler registering path %s",
+                        VcsComponentDtsHandler.XPATH)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler()
+        handlers = rift.tasklets.Group.Handler()
+        with self._dts.group_create(handler=handlers) as group:
+            self._regh = group.register(xpath=VcsComponentDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=(rwdts.Flag.PUBLISHER |
+                                               rwdts.Flag.NO_PREP_READ |
+                                               rwdts.Flag.DATASTORE),)
+
+    @asyncio.coroutine
+    def publish(self, xact, path, msg):
+        """ Publishes the VCS component """
+        self._log.debug("Publishing the VcsComponent xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.create_element(path, msg)
+        self._log.debug("Published the VcsComponent to %s xact = %s, %s:%s",
+                        VcsComponentDtsHandler.XPATH, xact, path, msg)
+
+class VnfrConsoleOperdataDtsHandler(object):
+    """ registers 'D,/vnfr:vnfr-console/vnfr:vnfr[id]/vdur[id]' and handles CRUD from DTS"""
+    @property
+    def vnfr_vdu_console_xpath(self):
+        """ path for resource-mgr"""
+        return ("D,/rw-vnfr:vnfr-console/rw-vnfr:vnfr[rw-vnfr:id='{}']/rw-vnfr:vdur[vnfr:id='{}']".format(self._vnfr_id,self._vdur_id))
+
+    def __init__(self, dts, log, loop, vnfm, vnfr_id, vdur_id, vdu_id):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._regh = None
+        self._vnfm = vnfm
+
+        self._vnfr_id = vnfr_id
+        self._vdur_id = vdur_id
+        self._vdu_id = vdu_id
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for VNFR VDU Operational Data read from dts """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            xpath = ks_path.to_xpath(RwVnfrYang.get_schema())
+            self._log.debug(
+                "Got VNFR VDU Opdata xact_info: %s, action: %s): %s:%s",
+                xact_info, action, xpath, msg
+                )
+
+            if action == rwdts.QueryAction.READ:
+                schema = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                self._log.debug("VDU Opdata path is {}".format(path_entry))
+                try:
+                    vnfr = self._vnfm.get_vnfr(self._vnfr_id)
+                except VnfRecordError as e:
+                    self._log.error("VNFR id %s not found", self._vnfr_id)
+                    xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK)
+                    return
+                try:
+                    vdur= vnfr._get_vdur_from_vdu_id(self._vdu_id)
+                    if not vdur._state == VDURecordState.READY:
+                        self._log.debug("VDUR state is not READY. current state is {}".format(vdur._state))
+                        xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK)
+                        return 
+                    with self._dts.transaction() as new_xact:
+                        resp = yield from vdur.read_resource(new_xact)
+                        vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+                        vdur_console.id = self._vdur_id
+                        if resp.console_url:
+                            vdur_console.console_url = resp.console_url
+                        else:
+                            vdur_console.console_url = 'none'
+                        self._log.debug("Recevied console URL for vdu {} is {}".format(self._vdu_id,vdur_console))
+                except Exception:
+                    self._log.exception("Caught exception while reading VDU %s", self._vdu_id)
+                    vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+                    vdur_console.id = self._vdur_id
+                    vdur_console.console_url = 'none'
+                      
+                xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK,
+                                            xpath=self.vnfr_vdu_console_xpath,
+                                            msg=vdur_console)
+            else:
+                #raise VnfRecordError("Not supported operation %s" % action)
+                self._log.error("Not supported operation %s" % action)
+                xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK)
+                return 
+                 
+
+        self._log.debug("Registering for VNFR VDU using xpath: %s",
+                        self.vnfr_vdu_console_xpath)
+        hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=self.vnfr_vdu_console_xpath,
+                                        handler=hdl,
+                                        flags=rwdts.Flag.PUBLISHER,
+                                        )
+
+
+class VnfrDtsHandler(object):
+    """ registers 'D,/vnfr:vnfr-catalog/vnfr:vnfr' and handles CRUD from DTS"""
+    XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+
+    def __init__(self, dts, log, loop, vnfm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnfm = vnfm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle"""
+        return self._regh
+
+    @property
+    def vnfm(self):
+        """ Return VNF manager instance """
+        return self._vnfm
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for vnfr create/update/delete/read requests from dts """
+        def on_commit(xact_info):
+            """ The transaction has been committed """
+            self._log.debug("Got vnfr commit (xact_info: %s)", xact_info)
+            return rwdts.MemberRspCode.ACTION_OK
+
+        def on_abort(*args):
+            """ Abort callback """
+            self._log.debug("VNF  transaction got aborted")
+
+        @asyncio.coroutine
+        def on_event(dts, g_reg, xact, xact_event, scratch_data):
+
+            @asyncio.coroutine
+            def instantiate_realloc_vnfr(vnfr):
+                """Re-populate the vnfm after restart
+
+                Arguments:
+                    vlink
+
+                """
+
+                yield from vnfr.instantiate(None, restart_mode=True)
+
+            if xact_event == rwdts.MemberEvent.INSTALL:
+                curr_cfg = self.regh.elements
+                for cfg in curr_cfg:
+                    vnfr = self.vnfm.create_vnfr(cfg)
+                    self._loop.create_task(instantiate_realloc_vnfr(vnfr))
+
+            self._log.debug("Got on_event in vnfm")
+
+            return rwdts.MemberRspCode.ACTION_OK
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            self._log.debug(
+                "Got vnfr on_prepare callback (xact_info: %s, action: %s): %s",
+                xact_info, action, msg
+                )
+
+            if action == rwdts.QueryAction.CREATE:
+                if not msg.has_field("vnfd_ref"):
+                    err = "Vnfd reference not provided"
+                    self._log.error(err)
+                    raise VnfRecordError(err)
+
+                vnfr = self.vnfm.create_vnfr(msg)
+                try:
+                    # RIFT-9105: Unable to add a READ query under an existing transaction
+                    # xact = xact_info.xact
+                    yield from vnfr.instantiate(None)
+                except Exception as e:
+                    self._log.exception(e)
+                    self._log.error("Error while instantiating vnfr:%s", vnfr.vnfr_id)
+                    vnfr.set_state(VirtualNetworkFunctionRecordState.FAILED)
+                    yield from vnfr.publish(None)
+            elif action == rwdts.QueryAction.DELETE:
+                schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                vnfr = self._vnfm.get_vnfr(path_entry.key00.id)
+
+                if vnfr is None:
+                    self._log.debug("VNFR id %s not found for delete", path_entry.key00.id)
+                    raise VirtualNetworkFunctionRecordNotFound(
+                        "VNFR id %s", path_entry.key00.id)
+
+                try:
+                    yield from vnfr.terminate(xact_info.xact)
+                    # Unref the VNFD
+                    vnfr.vnfd.unref()
+                    yield from self._vnfm.delete_vnfr(xact_info.xact, vnfr)
+                except Exception as e:
+                    self._log.exception(e)
+                    self._log.error("Caught exception while deleting vnfr %s", path_entry.key00.id)
+
+            elif action == rwdts.QueryAction.UPDATE:
+                schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                vnfr = None
+                try:
+                    vnfr = self._vnfm.get_vnfr(path_entry.key00.id)
+                except Exception as e:
+                    self._log.debug("No vnfr found with id %s", path_entry.key00.id)
+                    xact_info.respond_xpath(rwdts.XactRspCode.NA)
+                    return
+
+                if vnfr is None:
+                    self._log.debug("VNFR id %s not found for update", path_entry.key00.id)
+                    xact_info.respond_xpath(rwdts.XactRspCode.NA)
+                    return
+
+                self._log.debug("VNFR {} update config status {} (current {})".
+                                format(vnfr.name, msg.config_status, vnfr.config_status))
+                # Update the config status and publish
+                vnfr._config_status = msg.config_status
+                yield from vnfr.publish(None)
+
+            else:
+                raise NotImplementedError(
+                    "%s action on VirtualNetworkFunctionRecord not supported",
+                    action)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        self._log.debug("Registering for VNFR using xpath: %s",
+                        VnfrDtsHandler.XPATH,)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler(on_commit=on_commit,
+                                                    on_prepare=on_prepare,)
+        handlers = rift.tasklets.Group.Handler(on_event=on_event,)
+        with self._dts.group_create(handler=handlers) as group:
+            self._regh = group.register(xpath=VnfrDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=(rwdts.Flag.PUBLISHER |
+                                               rwdts.Flag.NO_PREP_READ |
+                                               rwdts.Flag.CACHE |
+                                               rwdts.Flag.DATASTORE),)
+
+    @asyncio.coroutine
+    def create(self, xact, path, msg):
+        """
+        Create a VNFR record in DTS with path and message
+        """
+        self._log.debug("Creating VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+
+        self.regh.create_element(path, msg)
+        self._log.debug("Created VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def update(self, xact, path, msg):
+        """
+        Update a VNFR record in DTS with path and message
+        """
+        self._log.debug("Updating VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.update_element(path, msg)
+        self._log.debug("Updated VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def delete(self, xact, path):
+        """
+        Delete a VNFR record in DTS with path and message
+        """
+        self._log.debug("Deleting VNFR xact = %s, %s", xact, path)
+        self.regh.delete_element(path)
+        self._log.debug("Deleted VNFR xact = %s, %s", xact, path)
+
+
+class VirtualNetworkFunctionDescriptor(object):
+    """
+    Virtual Network Function descriptor class
+    """
+
+    def __init__(self, dts, log, loop, vnfm, vnfd):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+        self._vnfm = vnfm
+        self._vnfd = vnfd
+        self._ref_count = 0
+
+    @property
+    def ref_count(self):
+        """ Returns the reference count associated with
+            this Virtual Network Function Descriptor"""
+        return self._ref_count
+
+    @property
+    def id(self):
+        """ Returns vnfd id """
+        return self._vnfd.id
+
+    @property
+    def name(self):
+        """ Returns vnfd name """
+        return self._vnfd.name
+
+    def in_use(self):
+        """ Returns whether vnfd is in use or not """
+        return True if self._ref_count > 0 else False
+
+    def ref(self):
+        """ Take a reference on this object """
+        self._ref_count += 1
+        return self._ref_count
+
+    def unref(self):
+        """ Release reference on this object """
+        if self.ref_count < 1:
+            msg = ("Unref on a VNFD object - vnfd id %s, ref_count = %s" %
+                   (self.id, self._ref_count))
+            self._log.critical(msg)
+            raise VnfRecordError(msg)
+        self._log.debug("Releasing ref on VNFD %s - curr ref_count:%s",
+                        self.id, self.ref_count)
+        self._ref_count -= 1
+        return self._ref_count
+
+    @property
+    def msg(self):
+        """ Return the message associated with this NetworkServiceDescriptor"""
+        return self._vnfd
+
+    @staticmethod
+    def path_for_id(vnfd_id):
+        """ Return path for the passed vnfd_id"""
+        return "C,/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = '{}']".format(vnfd_id)
+
+    def path(self):
+        """ Return the path associated with this NetworkServiceDescriptor"""
+        return VirtualNetworkFunctionDescriptor.path_for_id(self.id)
+
+    def update(self, vnfd):
+        """ Update the Virtual Network Function Descriptor """
+        if self.in_use():
+            self._log.error("Cannot update descriptor %s in use refcnt=%d",
+                            self.id, self.ref_count)
+
+            # The following loop is  added to debug RIFT-13284
+            for vnf_rec in self._vnfm._vnfrs.values():
+                if vnf_rec.vnfd_id == self.id:
+                    self._log.error("descriptor %s in used by %s:%s",
+                                    self.id, vnf_rec.vnfr_id, vnf_rec.msg)
+            raise VirtualNetworkFunctionDescriptorRefCountExists("Cannot update descriptor in use %s" % self.id)
+        self._vnfd = vnfd
+
+    def delete(self):
+        """ Delete the Virtual Network Function Descriptor """
+        if self.in_use():
+            self._log.error("Cannot delete descriptor %s in use refcnt=%d",
+                            self.id)
+
+            # The following loop is  added to debug RIFT-13284
+            for vnf_rec in self._vnfm._vnfrs.values():
+                if vnf_rec.vnfd_id == self.id:
+                    self._log.error("descriptor %s in used by %s:%s",
+                                    self.id, vnf_rec.vnfr_id, vnf_rec.msg)
+            raise VirtualNetworkFunctionDescriptorRefCountExists("Cannot delete descriptor in use %s" % self.id)
+        self._vnfm.delete_vnfd(self.id)
+
+
+class VnfdRefCountDtsHandler(object):
+    """ The VNFD Ref Count DTS handler """
+    XPATH = "D,/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count"
+
+    def __init__(self, dts, log, loop, vnfm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnfm = vnfm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle """
+        return self._regh
+
+    @property
+    def vnfm(self):
+        """ Return the NS manager instance """
+        return self._vnfm
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for VNFD ref count read from dts """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            xpath = ks_path.to_xpath(RwVnfrYang.get_schema())
+            self._log.debug(
+                "Got VNFD ref count get xact_info: %s, action: %s): %s:%s",
+                xact_info, action, xpath, msg
+                )
+
+            if action == rwdts.QueryAction.READ:
+                schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                vnfd_list = yield from self._vnfm.get_vnfd_refcount(path_entry.key00.vnfd_id_ref)
+                for xpath, msg in vnfd_list:
+                    self._log.debug("Responding to ref count query path:%s, msg:%s",
+                                    xpath, msg)
+                    xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.MORE,
+                                            xpath=xpath,
+                                            msg=msg)
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+            else:
+                raise VnfRecordError("Not supported operation %s" % action)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=VnfdRefCountDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=rwdts.Flag.PUBLISHER,
+                                        )
+
+
+class VdurDatastore(object):
+    """
+    This VdurDatastore is intended to expose select information about a VDUR
+    such that it can be referenced in a cloud config file. The data that is
+    exposed does not necessarily follow the structure of the data in the yang
+    model. This is intentional. The data that are exposed are intended to be
+    agnostic of the yang model so that changes in the model do not necessarily
+    require changes to the interface provided to the user. It also means that
+    the user does not need to be familiar with the RIFT.ware yang models.
+    """
+
+    def __init__(self):
+        """Create an instance of VdurDatastore"""
+        self._vdur_data = dict()
+        self._pattern = re.compile("vdu\[([^]]+)\]\.(.+)")
+
+    def add(self, vdur):
+        """Add a new VDUR to the datastore
+
+        Arguments:
+            vdur - a VirtualDeploymentUnitRecord instance
+
+        Raises:
+            A ValueError is raised if the VDUR is (1) None or (2) already in
+            the datastore.
+
+        """
+        if vdur.vdu_id is None:
+            raise ValueError('VDURs are required to have an ID')
+
+        if vdur.vdu_id in self._vdur_data:
+            raise ValueError('cannot add a VDUR more than once')
+
+        self._vdur_data[vdur.vdu_id] = dict()
+
+        def set_if_not_none(key, attr):
+            if attr is not None:
+                self._vdur_data[vdur.vdu_id][key] = attr
+
+        set_if_not_none('name', vdur._vdud.name)
+        set_if_not_none('mgmt.ip', vdur.vm_management_ip)
+
+    def update(self, vdur):
+        """Update the VDUR information in the datastore
+
+        Arguments:
+            vdur - a GI representation of a VDUR
+
+        Raises:
+            A ValueError is raised if the VDUR is (1) None or (2) already in
+            the datastore.
+
+        """
+        if vdur.vdu_id is None:
+            raise ValueError('VNFDs are required to have an ID')
+
+        if vdur.vdu_id not in self._vdur_data:
+            raise ValueError('VNF is not recognized')
+
+        def set_or_delete(key, attr):
+            if attr is None:
+                if key in self._vdur_data[vdur.vdu_id]:
+                    del self._vdur_data[vdur.vdu_id][key]
+
+            else:
+                self._vdur_data[vdur.vdu_id][key] = attr
+
+        set_or_delete('name', vdur._vdud.name)
+        set_or_delete('mgmt.ip', vdur.vm_management_ip)
+
+    def remove(self, vdur_id):
+        """Remove all of the data associated with specified VDUR
+
+        Arguments:
+            vdur_id - the identifier of a VNFD in the datastore
+
+        Raises:
+            A ValueError is raised if the VDUR is not contained in the
+            datastore.
+
+        """
+        if vdur_id not in self._vdur_data:
+            raise ValueError('VNF is not recognized')
+
+        del self._vdur_data[vdur_id]
+
+    def get(self, expr):
+        """Retrieve VDUR information from the datastore
+
+        An expression should be of the form,
+
+            vdu[<id>].<attr>
+
+        where <id> is the VDUR ID (an unquoted UUID), and <attr> is the name of
+        the exposed attribute that the user wishes to retrieve.
+
+        If the requested data is not available, None is returned.
+
+        Arguments:
+            expr - a string that specifies the data to return
+
+        Raises:
+            A ValueError is raised if the provided expression cannot be parsed.
+
+        Returns:
+            The requested data or None
+
+        """
+        result = self._pattern.match(expr)
+        if result is None:
+            raise ValueError('data expression not recognized ({})'.format(expr))
+
+        vdur_id, key = result.groups()
+
+        if vdur_id not in self._vdur_data:
+            return None
+
+        return self._vdur_data[vdur_id].get(key, None)
+
+
+class VnfManager(object):
+    """ The virtual network function manager class """
+    def __init__(self, dts, log, loop, cluster_name):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._cluster_name = cluster_name
+
+        self._vcs_handler = VcsComponentDtsHandler(dts, log, loop, self)
+        self._vnfr_handler = VnfrDtsHandler(dts, log, loop, self)
+
+        self._dts_handlers = [VnfdDtsHandler(dts, log, loop, self),
+                              self._vnfr_handler,
+                              self._vcs_handler,
+                              VnfdRefCountDtsHandler(dts, log, loop, self)]
+        self._vnfrs = {}
+        self._vnfds = {}
+
+    @property
+    def vnfr_handler(self):
+        """ VNFR dts handler """
+        return self._vnfr_handler
+
+    @property
+    def vcs_handler(self):
+        """ VCS dts handler """
+        return self._vcs_handler
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register all static DTS handlers """
+        for hdl in self._dts_handlers:
+            yield from hdl.register()
+
+    @asyncio.coroutine
+    def run(self):
+        """ Run this VNFM instance """
+        self._log.debug("Run VNFManager - registering static DTS handlers""")
+        yield from self.register()
+
+    def get_vnfr(self, vnfr_id):
+        """ get VNFR by vnfr id """
+
+        if vnfr_id not in self._vnfrs:
+            raise VnfRecordError("VNFR id %s not found", vnfr_id)
+
+        return self._vnfrs[vnfr_id]
+
+    def create_vnfr(self, vnfr):
+        """ Create a VNFR instance """
+        if vnfr.id in self._vnfrs:
+            msg = "Vnfr id %s already exists" % vnfr.id
+            self._log.error(msg)
+            raise VnfRecordError(msg)
+
+        self._log.info("Create VirtualNetworkFunctionRecord %s from vnfd_id: %s",
+                       vnfr.id,
+                       vnfr.vnfd_ref)
+
+        self._vnfrs[vnfr.id] = VirtualNetworkFunctionRecord(
+            self._dts, self._log, self._loop, self._cluster_name, self, self.vcs_handler, vnfr
+            )
+        return self._vnfrs[vnfr.id]
+
+    @asyncio.coroutine
+    def delete_vnfr(self, xact, vnfr):
+        """ Create a VNFR instance """
+        if vnfr.vnfr_id in self._vnfrs:
+            self._log.debug("Deleting VNFR id %s", vnfr.vnfr_id)
+            yield from self._vnfr_handler.delete(xact, vnfr.xpath)
+            del self._vnfrs[vnfr.vnfr_id]
+
+    @asyncio.coroutine
+    def fetch_vnfd(self, vnfd_id):
+        """ Fetch VNFDs based with the vnfd id"""
+        vnfd_path = VirtualNetworkFunctionDescriptor.path_for_id(vnfd_id)
+        self._log.debug("Fetch vnfd with path %s", vnfd_path)
+        vnfd = None
+
+        res_iter = yield from self._dts.query_read(vnfd_path, rwdts.XactFlag.MERGE)
+
+        for ent in res_iter:
+            res = yield from ent
+            vnfd = res.result
+
+        if vnfd is None:
+            err = "Failed to get  Vnfd %s" % vnfd_id
+            self._log.error(err)
+            raise VnfRecordError(err)
+
+        self._log.debug("Fetched vnfd for path %s, vnfd - %s", vnfd_path, vnfd)
+
+        return vnfd
+
+    @asyncio.coroutine
+    def get_vnfd_ref(self, vnfd_id):
+        """ Get Virtual Network Function descriptor for the passed vnfd_id"""
+        vnfd = yield from self.get_vnfd(vnfd_id)
+        vnfd.ref()
+        return vnfd
+
+    @asyncio.coroutine
+    def get_vnfd(self, vnfd_id):
+        """ Get Virtual Network Function descriptor for the passed vnfd_id"""
+        vnfd = None
+        if vnfd_id not in self._vnfds:
+            self._log.error("Cannot find VNFD id:%s", vnfd_id)
+            vnfd = yield from self.fetch_vnfd(vnfd_id)
+
+            if vnfd is None:
+                self._log.error("Cannot find VNFD id:%s", vnfd_id)
+                raise VirtualNetworkFunctionDescriptorError("Cannot find VNFD id:%s", vnfd_id)
+
+            if vnfd.id != vnfd_id:
+                self._log.error("Bad Recovery state {} found for {}".format(vnfd.id, vnfd_id))
+                raise VirtualNetworkFunctionDescriptorError("Bad Recovery state {} found for {}".format(vnfd.id, vnfd_id))
+
+            if vnfd.id not in self._vnfds:
+                self.create_vnfd(vnfd)
+
+        return self._vnfds[vnfd_id]
+
+    def vnfd_in_use(self, vnfd_id):
+        """ Is this VNFD in use """
+        self._log.debug("Is this VNFD in use - msg:%s", vnfd_id)
+        if vnfd_id in self._vnfds:
+            return self._vnfds[vnfd_id].in_use()
+        return False
+
+    @asyncio.coroutine
+    def publish_vnfr(self, xact, path, msg):
+        """ Publish a VNFR """
+        self._log.debug("publish_vnfr called with path %s, msg %s",
+                        path, msg)
+        yield from self.vnfr_handler.update(xact, path, msg)
+
+    def create_vnfd(self, vnfd):
+        """ Create a virtual network function descriptor """
+        self._log.debug("Create virtual networkfunction descriptor - %s", vnfd)
+        if vnfd.id in self._vnfds:
+            self._log.error("Cannot create VNFD %s -VNFD id already exists", vnfd)
+            raise VirtualNetworkFunctionDescriptorError("VNFD already exists-%s", vnfd.id)
+
+        self._vnfds[vnfd.id] = VirtualNetworkFunctionDescriptor(self._dts,
+                                                                self._log,
+                                                                self._loop,
+                                                                self,
+                                                                vnfd)
+        return self._vnfds[vnfd.id]
+
+    def update_vnfd(self, vnfd):
+        """ update the Virtual Network Function descriptor """
+        self._log.debug("Update virtual network function descriptor - %s", vnfd)
+
+        # Hack to remove duplicates from leaf-lists - to be fixed by RIFT-6511
+        for ivld in vnfd.internal_vld:
+            ivld.internal_connection_point_ref = list(set(ivld.internal_connection_point_ref))
+
+        if vnfd.id not in self._vnfds:
+            self._log.debug("No VNFD found - creating VNFD id = %s", vnfd.id)
+            self.create_vnfd(vnfd)
+        else:
+            self._log.debug("Updating VNFD id = %s, vnfd = %s", vnfd.id, vnfd)
+            self._vnfds[vnfd.id].update(vnfd)
+
+    @asyncio.coroutine
+    def delete_vnfd(self, vnfd_id):
+        """ Delete the Virtual Network Function descriptor with the passed id """
+        self._log.debug("Deleting the virtual network function descriptor - %s", vnfd_id)
+        if vnfd_id not in self._vnfds:
+            self._log.debug("Delete VNFD failed - cannot find vnfd-id %s", vnfd_id)
+            raise VirtualNetworkFunctionDescriptorNotFound("Cannot find %s", vnfd_id)
+
+        if self._vnfds[vnfd_id].in_use():
+            self._log.debug("Cannot delete VNFD id %s reference exists %s",
+                            vnfd_id,
+                            self._vnfds[vnfd_id].ref_count)
+            raise VirtualNetworkFunctionDescriptorRefCountExists(
+                "Cannot delete :%s, ref_count:%s",
+                vnfd_id,
+                self._vnfds[vnfd_id].ref_count)
+
+        # Remove any files uploaded with VNFD and stored under $RIFT_ARTIFACTS/libs/<id>
+        try:
+            rift_artifacts_dir = os.environ['RIFT_ARTIFACTS']
+            vnfd_dir = os.path.join(rift_artifacts_dir, 'launchpad/libs', vnfd_id)
+            if os.path.exists(vnfd_dir):
+                shutil.rmtree(vnfd_dir, ignore_errors=True)
+        except Exception as e:
+            self._log.error("Exception in cleaning up VNFD {}: {}".
+                            format(self._vnfds[vnfd_id].name, e))
+            self._log.exception(e)
+
+        del self._vnfds[vnfd_id]
+
+    def vnfd_refcount_xpath(self, vnfd_id):
+        """ xpath for ref count entry """
+        return (VnfdRefCountDtsHandler.XPATH +
+                "[rw-vnfr:vnfd-id-ref = '{}']").format(vnfd_id)
+
+    @asyncio.coroutine
+    def get_vnfd_refcount(self, vnfd_id):
+        """ Get the vnfd_list from this VNFM"""
+        vnfd_list = []
+        if vnfd_id is None or vnfd_id == "":
+            for vnfd in self._vnfds.values():
+                vnfd_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount()
+                vnfd_msg.vnfd_id_ref = vnfd.id
+                vnfd_msg.instance_ref_count = vnfd.ref_count
+                vnfd_list.append((self.vnfd_refcount_xpath(vnfd.id), vnfd_msg))
+        elif vnfd_id in self._vnfds:
+                vnfd_msg.vnfd_id_ref = self._vnfds[vnfd_id].id
+                vnfd_msg.instance_ref_count = self._vnfds[vnfd_id].ref_count
+                vnfd_list.append((self.vnfd_refcount_xpath(vnfd_id), vnfd_msg))
+
+        return vnfd_list
+
+
+class VnfmTasklet(rift.tasklets.Tasklet):
+    """ VNF Manager tasklet class """
+    def __init__(self, *args, **kwargs):
+        super(VnfmTasklet, self).__init__(*args, **kwargs)
+        self.rwlog.set_category("rw-mano-log")
+        self.rwlog.set_subcategory("vnfm")
+
+        self._dts = None
+        self._vnfm = None
+
+    def start(self):
+        try:
+            super(VnfmTasklet, self).start()
+            self.log.info("Starting VnfmTasklet")
+
+            self.log.setLevel(logging.DEBUG)
+
+            self.log.debug("Registering with dts")
+            self._dts = rift.tasklets.DTS(self.tasklet_info,
+                                          RwVnfmYang.get_schema(),
+                                          self.loop,
+                                          self.on_dts_state_change)
+
+            self.log.debug("Created DTS Api GI Object: %s", self._dts)
+        except Exception:
+            print("Caught Exception in VNFM start:", sys.exc_info()[0])
+            raise
+
+    def on_instance_started(self):
+        """ Task insance started callback """
+        self.log.debug("Got instance started callback")
+
+    def stop(self):
+        try:
+            self._dts.deinit()
+        except Exception:
+            print("Caught Exception in VNFM stop:", sys.exc_info()[0])
+            raise
+
+    @asyncio.coroutine
+    def init(self):
+        """ Task init callback """
+        try:
+            vm_parent_name = self.tasklet_info.get_parent_vm_parent_instance_name()
+            assert vm_parent_name is not None
+            self._vnfm = VnfManager(self._dts, self.log, self.loop, vm_parent_name)
+            yield from self._vnfm.run()
+        except Exception:
+            print("Caught Exception in VNFM init:", sys.exc_info()[0])
+            raise
+
+    @asyncio.coroutine
+    def run(self):
+        """ Task run callback """
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Take action according to current dts state to transition
+        application into the corresponding application state
+
+        Arguments
+            state - current dts state
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self._dts.handle.set_state(next_state)
diff --git a/rwlaunchpad/plugins/rwvnfm/rwvnfmtasklet.py b/rwlaunchpad/plugins/rwvnfm/rwvnfmtasklet.py
new file mode 100755
index 0000000..37ada1a
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvnfm/rwvnfmtasklet.py
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwvnfmtasklet
+
+class Tasklet(rift.tasklets.rwvnfmtasklet.VnfmTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwlaunchpad/plugins/rwvns/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/CMakeLists.txt
new file mode 100644
index 0000000..b10d81d
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/CMakeLists.txt
@@ -0,0 +1,51 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 05/15/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwvnstasklet)
+
+set(subdirs yang vala)
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/vlmgr/__init__.py
+    rift/vlmgr/rwvlmgr.py
+    rift/topmgr/__init__.py
+    rift/topmgr/rwtopmgr.py
+    rift/topmgr/rwtopdatastore.py
+    rift/topmgr/core.py
+    rift/topmgr/mock.py
+    rift/topmgr/sdnsim.py
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/rwlaunchpad/plugins/rwvns/Makefile b/rwlaunchpad/plugins/rwvns/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/__init__.py b/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/__init__.py
new file mode 100644
index 0000000..6b68c19
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/__init__.py
@@ -0,0 +1 @@
+from .rwvnstasklet import VnsTasklet
diff --git a/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py b/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py
new file mode 100755
index 0000000..1f88824
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py
@@ -0,0 +1,458 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import logging
+import os
+import sys
+
+import gi
+gi.require_version('RwVnsYang', '1.0')
+gi.require_version('RwDts', '1.0')
+from gi.repository import (
+    RwVnsYang,
+    RwSdnYang,
+    RwDts as rwdts,
+    RwTypes,
+    ProtobufC,
+)
+
+import rift.tasklets
+
+from rift.vlmgr import (
+    VlrDtsHandler,
+    VldDtsHandler,
+    VirtualLinkRecord,
+)
+
+from rift.topmgr import (
+    NwtopStaticDtsHandler,
+    NwtopDiscoveryDtsHandler,
+    NwtopDataStore,
+    SdnAccountMgr,
+)
+
+
+class SdnInterfaceError(Exception):
+    """ SDN interface creation Error """
+    pass
+
+
+class SdnPluginError(Exception):
+    """ SDN plugin creation Error """
+    pass
+
+
+class VlRecordError(Exception):
+    """ Vlr Record creation Error """
+    pass
+
+
+class VlRecordNotFound(Exception):
+    """ Vlr Record not found"""
+    pass
+
+class SdnAccountError(Exception):
+    """ Error while creating/deleting/updating SDN Account"""
+    pass
+
+class SdnAccountNotFound(Exception):
+    pass
+
+class SDNAccountDtsOperdataHandler(object):
+    def __init__(self, dts, log, loop, parent):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._parent = parent
+
+    def _register_show_status(self):
+        def get_xpath(sdn_name=None):
+            return "D,/rw-sdn:sdn-account{}/rw-sdn:connection-status".format(
+                    "[name='%s']" % sdn_name if sdn_name is not None else ''
+                   )
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            path_entry = RwSdnYang.SDNAccountConfig.schema().keyspec_to_entry(ks_path)
+            sdn_account_name = path_entry.key00.name
+            self._log.debug("Got show sdn connection status request: %s", ks_path.create_string())
+
+            try:
+                saved_accounts = self._parent._acctmgr.get_saved_sdn_accounts(sdn_account_name)
+                for account in saved_accounts:
+                    sdn_acct = RwSdnYang.SDNAccountConfig()
+                    sdn_acct.from_dict(account.as_dict())
+
+                    self._log.debug("Responding to sdn connection status request: %s", sdn_acct.connection_status)
+                    xact_info.respond_xpath(
+                            rwdts.XactRspCode.MORE,
+                            xpath=get_xpath(account.name),
+                            msg=sdn_acct.connection_status,
+                            )
+            except KeyError as e:
+                self._log.warning(str(e))
+                xact_info.respond_xpath(rwdts.XactRspCode.NA)
+                return
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        yield from self._dts.register(
+                xpath=get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    def _register_validate_rpc(self):
+        def get_xpath():
+            return "/rw-sdn:update-sdn-status"
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            if not msg.has_field("sdn_account"):
+                raise SdnAccountNotFound("SDN account name not provided")
+
+            sdn_account_name = msg.sdn_account
+            account = self._parent._acctmgr.get_sdn_account(sdn_account_name)
+            if account is None:
+                self._log.warning("SDN account %s does not exist", sdn_account_name)
+                xact_info.respond_xpath(rwdts.XactRspCode.NA)
+                return
+
+            self._parent._acctmgr.start_validate_credentials(self._loop, sdn_account_name)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        yield from self._dts.register(
+                xpath=get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self._register_show_status()
+        yield from self._register_validate_rpc()
+
+class SDNAccountDtsHandler(object):
+    XPATH = "C,/rw-sdn:sdn-account"
+
+    def __init__(self, dts, log, parent):
+        self._dts = dts
+        self._log = log
+        self._parent = parent
+
+        self._sdn_account = {}
+
+    def _set_sdn_account(self, account):
+        self._log.info("Setting sdn account: {}".format(account))
+        if account.name in self._sdn_account:
+            self._log.error("SDN Account with name %s already exists. Ignoring config", account.name);
+        self._sdn_account[account.name]  = account
+        self._parent._acctmgr.set_sdn_account(account)
+
+    def _del_sdn_account(self, account_name):
+        self._log.info("Deleting sdn account: {}".format(account_name))
+        del self._sdn_account[account_name]
+
+        self._parent._acctmgr.del_sdn_account(account_name)
+
+    def _update_sdn_account(self, account):
+        self._log.info("Updating sdn account: {}".format(account))
+        # No need to update locally saved sdn_account's updated fields, as they
+        # are not used anywhere. Call the parent's update callback.
+        self._parent._acctmgr.update_sdn_account(account)
+
+    @asyncio.coroutine
+    def register(self):
+        def apply_config(dts, acg, xact, action, _):
+            self._log.debug("Got sdn account apply config (xact: %s) (action: %s)", xact, action)
+            if action == rwdts.AppconfAction.INSTALL and xact.id is None:
+                self._log.debug("No xact handle.  Skipping apply config")
+                return RwTypes.RwStatus.SUCCESS
+
+            return RwTypes.RwStatus.SUCCESS
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ Prepare callback from DTS for SDN Account config """
+
+            self._log.info("SDN Cloud account config received: %s", msg)
+
+            fref = ProtobufC.FieldReference.alloc()
+            fref.goto_whole_message(msg.to_pbcm())
+
+            if fref.is_field_deleted():
+                # Delete the sdn account record
+                self._del_sdn_account(msg.name)
+            else:
+                # If the account already exists, then this is an update.
+                if msg.name in self._sdn_account:
+                    self._log.debug("SDN account already exists. Invoking on_prepare update request")
+                    if msg.has_field("account_type"):
+                        errmsg = "Cannot update SDN account's account-type."
+                        self._log.error(errmsg)
+                        xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+                                                   SDNAccountDtsHandler.XPATH,
+                                                   errmsg)
+                        raise SdnAccountError(errmsg)
+
+                    # Update the sdn account record
+                    self._update_sdn_account(msg)
+                else:
+                    self._log.debug("SDN account does not already exist. Invoking on_prepare add request")
+                    if not msg.has_field('account_type'):
+                        errmsg = "New SDN account must contain account-type field."
+                        self._log.error(errmsg)
+                        xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+                                                   SDNAccountDtsHandler.XPATH,
+                                                   errmsg)
+                        raise SdnAccountError(errmsg)
+
+                    # Set the sdn account record
+                    self._set_sdn_account(msg)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+
+        self._log.debug("Registering for Sdn Account config using xpath: %s",
+                        SDNAccountDtsHandler.XPATH,
+                        )
+
+        acg_handler = rift.tasklets.AppConfGroup.Handler(
+                        on_apply=apply_config,
+                        )
+
+        with self._dts.appconf_group_create(acg_handler) as acg:
+            acg.register(
+                    xpath=SDNAccountDtsHandler.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
+                    on_prepare=on_prepare
+                    )
+
+
+class VnsManager(object):
+    """ The Virtual Network Service Manager """
+    def __init__(self, dts, log, log_hdl, loop):
+        self._dts = dts
+        self._log = log
+        self._log_hdl = log_hdl
+        self._loop = loop
+        self._vlr_handler = VlrDtsHandler(dts, log, loop, self)
+        self._vld_handler = VldDtsHandler(dts, log, loop, self)
+        self._sdn_handler = SDNAccountDtsHandler(dts,log,self)
+        self._sdn_opdata_handler = SDNAccountDtsOperdataHandler(dts,log, loop, self)
+        self._acctmgr = SdnAccountMgr(self._log, self._log_hdl, self._loop)
+        self._nwtopdata_store = NwtopDataStore(log)
+        self._nwtopdiscovery_handler = NwtopDiscoveryDtsHandler(dts, log, loop, self._acctmgr, self._nwtopdata_store)
+        self._nwtopstatic_handler = NwtopStaticDtsHandler(dts, log, loop, self._acctmgr, self._nwtopdata_store)
+        self._vlrs = {}
+
+    @asyncio.coroutine
+    def register_vlr_handler(self):
+        """ Register vlr DTS handler """
+        self._log.debug("Registering  DTS VLR handler")
+        yield from self._vlr_handler.register()
+
+    @asyncio.coroutine
+    def register_vld_handler(self):
+        """ Register vlr DTS handler """
+        self._log.debug("Registering  DTS VLD handler")
+        yield from self._vld_handler.register()
+
+    @asyncio.coroutine
+    def register_sdn_handler(self):
+        """ Register vlr DTS handler """
+        self._log.debug("Registering  SDN Account config handler")
+        yield from self._sdn_handler.register()
+        yield from self._sdn_opdata_handler.register()
+
+    @asyncio.coroutine
+    def register_nwtopstatic_handler(self):
+        """ Register static NW topology DTS handler """
+        self._log.debug("Registering  static DTS NW topology handler")
+        yield from self._nwtopstatic_handler.register()
+
+    @asyncio.coroutine
+    def register_nwtopdiscovery_handler(self):
+        """ Register discovery-based NW topology DTS handler """
+        self._log.debug("Registering  discovery-based DTS NW topology handler")
+        yield from self._nwtopdiscovery_handler.register()
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register all static DTS handlers"""
+        yield from self.register_sdn_handler()
+        yield from self.register_vlr_handler()
+        yield from self.register_vld_handler()
+        yield from self.register_nwtopstatic_handler()
+        # Not used for now
+        yield from self.register_nwtopdiscovery_handler()
+
+    def create_vlr(self, msg):
+        """ Create VLR """
+        if msg.id in self._vlrs:
+            err = "Vlr id %s already exists" % msg.id
+            self._log.error(err)
+            # raise VlRecordError(err)
+            return self._vlrs[msg.id]
+
+        self._log.info("Creating VirtualLinkRecord %s", msg.id)
+        self._vlrs[msg.id] = VirtualLinkRecord(self._dts,
+                                               self._log,
+                                               self._loop,
+                                               self,
+                                               msg,
+                                               msg.res_id
+                                               )
+        return self._vlrs[msg.id]
+
+    def get_vlr(self, vlr_id):
+        """  Get VLR by vlr id """
+        return self._vlrs[vlr_id]
+
+    @asyncio.coroutine
+    def delete_vlr(self, vlr_id, xact):
+        """ Delete VLR with the passed id"""
+        if vlr_id not in self._vlrs:
+            err = "Delete Failed - Vlr id %s not found" % vlr_id
+            self._log.error(err)
+            raise VlRecordNotFound(err)
+
+        self._log.info("Deleting virtual link id %s", vlr_id)
+        yield from self._vlrs[vlr_id].terminate(xact)
+        del self._vlrs[vlr_id]
+        self._log.info("Deleted virtual link id %s", vlr_id)
+
+    def find_vlr_by_vld_id(self, vld_id):
+        """ Find a VLR matching the VLD Id """
+        for vlr in self._vlrs.values():
+            if vlr.vld_id == vld_id:
+                return vlr
+        return None
+
+    @asyncio.coroutine
+    def run(self):
+        """ Run this VNSM instance """
+        self._log.debug("Run VNSManager - registering static DTS handlers")
+        yield from self.register()
+
+    def vld_in_use(self, vld_id):
+        """ Is this VLD in use """
+        return False
+
+    @asyncio.coroutine
+    def publish_vlr(self, xact, path, msg):
+        """ Publish a VLR """
+        self._log.debug("Publish vlr called with path %s, msg %s",
+                        path, msg)
+        yield from self._vlr_handler.update(xact, path, msg)
+
+    @asyncio.coroutine
+    def unpublish_vlr(self, xact, path):
+        """ Publish a VLR """
+        self._log.debug("Unpublish vlr called with path %s", path)
+        yield from self._vlr_handler.delete(xact, path)
+
+
+class VnsTasklet(rift.tasklets.Tasklet):
+    """ The VNS tasklet class """
+    def __init__(self, *args, **kwargs):
+        super(VnsTasklet, self).__init__(*args, **kwargs)
+        self.rwlog.set_category("rw-mano-log")
+        self.rwlog.set_subcategory("vns")
+
+        self._dts = None
+        self._vlr_handler = None
+
+        self._vnsm = None
+        # A mapping of instantiated vlr_id's to VirtualLinkRecord objects
+        self._vlrs = {}
+
+    def start(self):
+        super(VnsTasklet, self).start()
+        self.log.info("Starting VnsTasklet")
+
+        self.log.debug("Registering with dts")
+        self._dts = rift.tasklets.DTS(self.tasklet_info,
+                                      RwVnsYang.get_schema(),
+                                      self.loop,
+                                      self.on_dts_state_change)
+
+        self.log.debug("Created DTS Api GI Object: %s", self._dts)
+
+    def on_instance_started(self):
+        """ The task instance started callback"""
+        self.log.debug("Got instance started callback")
+
+    def stop(self):
+      try:
+         self._dts.deinit()
+      except Exception:
+         print("Caught Exception in VNS stop:", sys.exc_info()[0])
+         raise
+
+    @asyncio.coroutine
+    def init(self):
+        """ task init callback"""
+        self._vnsm = VnsManager(dts=self._dts,
+                                log=self.log,
+                                log_hdl=self.log_hdl,
+                                loop=self.loop)
+        yield from self._vnsm.run()
+
+        # NSM needs to detect VLD deletion that has active VLR
+        # self._vld_handler = VldDescriptorConfigDtsHandler(
+        #         self._dts, self.log, self.loop, self._vlrs,
+        #         )
+        # yield from self._vld_handler.register()
+
+    @asyncio.coroutine
+    def run(self):
+        """ tasklet run callback """
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Take action according to current dts state to transition
+        application into the corresponding application state
+
+        Arguments
+            state - current dts state
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self._dts.handle.set_state(next_state)
diff --git a/rwlaunchpad/plugins/rwvns/rift/topmgr/__init__.py b/rwlaunchpad/plugins/rwvns/rift/topmgr/__init__.py
new file mode 100644
index 0000000..f570abc
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/rift/topmgr/__init__.py
@@ -0,0 +1,37 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Ravi Chamarty
+# Creation Date: 10/28/2015
+# 
+
+from .rwtopmgr import (
+    NwtopDiscoveryDtsHandler,
+    NwtopStaticDtsHandler,
+    SdnAccountMgr,
+)
+
+from .rwtopdatastore import (
+    NwtopDataStore,
+)
+
+try:
+    from .sdnsim import SdnSim
+    from .core import Topology
+    from .mock import Mock
+
+except ImportError as e:
+    print("Error: Unable to load sdn implementation: %s" % str(e))
+
diff --git a/rwlaunchpad/plugins/rwvns/rift/topmgr/core.py b/rwlaunchpad/plugins/rwvns/rift/topmgr/core.py
new file mode 100644
index 0000000..dd3ad2f
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/rift/topmgr/core.py
@@ -0,0 +1,49 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import functools
+
+#from . import exceptions
+
+
+def unsupported(f):
+    @functools.wraps(f)
+    def impl(*args, **kwargs):
+        msg = '{} not supported'.format(f.__name__)
+        raise exceptions.RWErrorNotSupported(msg)
+
+    return impl
+
+
+class Topology(object):
+    """
+    Topoology defines a base class for sdn driver implementations. Note that
+    not all drivers will support the complete set of functionality presented
+    here.
+    """
+
+    @unsupported
+    def get_network_list(self, account):
+        """
+        Returns the discovered network associated with the specified account.
+
+        @param account - a SDN account
+
+        @return a discovered network
+        """
+        pass
+
diff --git a/rwlaunchpad/plugins/rwvns/rift/topmgr/mock.py b/rwlaunchpad/plugins/rwvns/rift/topmgr/mock.py
new file mode 100644
index 0000000..cc0e489
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/rift/topmgr/mock.py
@@ -0,0 +1,50 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import mock
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import RwcalYang
+
+from . import core
+
+import logging
+
+logger = logging.getLogger('rwsdn.mock')
+
+class Mock(core.Topology):
+    """This class implements the abstract methods in the Topology class.
+    Mock is used for unit testing."""
+
+    def __init__(self):
+        super(Mock, self).__init__()
+
+        m = mock.MagicMock()
+
+        create_default_topology()
+
+    def get_network_list(self, account):
+        """
+        Returns the discovered network
+
+        @param account - a SDN account
+
+        """
+        logger.debug("Not yet implemented")
+        return None
+
diff --git a/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopdatastore.py b/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopdatastore.py
new file mode 100644
index 0000000..ad021a8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopdatastore.py
@@ -0,0 +1,186 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import gi
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+    IetfNetworkYang,
+    IetfNetworkTopologyYang,
+    IetfL2TopologyYang,
+    RwTopologyYang,
+    RwTypes
+)
+import logging
+from gi.repository.RwTypes import RwStatus
+
+
+class NwtopDataStore(object):
+    """ Common datastore for discovered and static topologies """
+    def __init__(self, log):
+        self._networks = {}
+        self._log = log
+
+    """ Deep copy utility for topology class """
+    def rwtop_copy_object(self, obj):
+        dup = obj.__class__()
+        dup.copy_from(obj)
+        return dup
+
+    """ Utility for updating L2 topology attributes """
+    def _update_l2_attr(self, current_elem, new_elem, new_l2_attr, attr_field_name):
+        if not getattr(current_elem, attr_field_name):
+           self._log.debug ("Creating L2 attributes..%s", l2_attr_field)
+           setattr(current_elem, attr_field_name, new_l2_attr)
+           return
+
+        for l2_attr_field in new_l2_attr.fields:
+             l2_elem_attr_value = getattr(new_l2_attr, l2_attr_field)
+             if l2_elem_attr_value:
+                 self._log.debug ("Updating L2 attributes..%s", l2_attr_field)
+                 setattr(getattr(current_elem, attr_field_name), l2_attr_field, getattr(new_l2_attr, l2_attr_field))
+
+    """ Utility for updating termination point attributes """
+    def _update_termination_point(self, current_node, new_node, new_tp):
+        current_tp = next((x for x in current_node.termination_point if x.tp_id == new_tp.tp_id), None)
+        if current_tp is None:
+            self._log.debug("Creating termination point..%s", new_tp)
+            # Add tp to current node
+            new_tp_dup = self.rwtop_copy_object(new_tp)
+            current_node.termination_point.append(new_tp_dup)
+            return
+        # Update current tp
+        for tp_field in new_tp.fields:
+            tp_field_value = getattr(new_tp, tp_field)
+            if tp_field_value:
+                self._log.debug("Updating termination point..%s", tp_field)
+                if (tp_field == 'tp_id'):
+                    # Don't change key
+                    pass
+                elif (tp_field == 'l2_termination_point_attributes'):
+                    self._update_l2_attr(current_tp, new_tp, tp_field_value, tp_field)
+                elif (tp_field == 'supporting_termination_point'):
+                    self._log.debug(tp_field)
+                else:
+                    self._log.info("Updating termination point..Not implemented %s", tp_field)
+                    #raise NotImplementedError
+
+    """ Utility for updating link attributes """
+    def _update_link(self, current_nw, new_nw, new_link):
+        current_link = next((x for x in current_nw.link if x.link_id == new_link.link_id), None)
+        if current_link is None:
+            # Add link to current nw
+            self._log.info("Creating link..%s", new_link )
+            new_link_dup = self.rwtop_copy_object(new_link)
+            current_nw.link.append(new_link_dup)
+            return
+        # Update current link
+        for link_field in new_link.fields:
+            link_field_value = getattr(new_link, link_field)
+            if link_field_value:
+                self._log.info("Updating link..%s", link_field)
+                if (link_field == 'link_id'):
+                    # Don't change key
+                    pass
+                elif (link_field == 'source'):
+                    if getattr(link_field_value, 'source_node') is not None:
+                       current_link.source.source_node = getattr(link_field_value, 'source_node')
+                    if getattr(link_field_value, 'source_tp') is not None:
+                       current_link.source.source_tp = getattr(link_field_value, 'source_tp')
+                elif (link_field == 'destination'):
+                    if getattr(link_field_value, 'dest_node') is not None:
+                       current_link.destination.dest_node = link_field_value.dest_node
+                    if getattr(link_field_value, 'dest_tp') is not None:
+                       current_link.destination.dest_tp = link_field_value.dest_tp
+                elif (link_field == 'l2_link_attributes'):
+                    self._update_l2_attr(current_link, new_link, link_field_value, link_field)
+                elif (link_field == 'supporting_link'):
+                    self._log.debug(link_field)
+                else:
+                    self._log.info("Update link..Not implemented %s", link_field)
+                    #raise NotImplementedError
+
+
+    """ Utility for updating node attributes """
+    def _update_node(self, current_nw, new_nw, new_node):
+        current_node = next((x for x in current_nw.node if x.node_id == new_node.node_id), None)
+        if current_node is None:
+            # Add node to current nw
+            self._log.debug("Creating node..%s", new_node)
+            new_node_dup = self.rwtop_copy_object(new_node)
+            current_nw.node.append(new_node_dup)
+            return
+        # Update current node
+        for node_field in new_node.fields:
+            node_field_value = getattr(new_node, node_field)
+            if node_field_value:
+                self._log.debug("Updating node..%s", node_field)
+                if (node_field == 'node_id'):
+                    # Don't change key
+                    pass
+                elif (node_field == 'l2_node_attributes'):
+                    self._update_l2_attr(current_node, new_node, node_field_value, node_field)
+                elif (node_field == 'termination_point'):
+                    for tp in new_node.termination_point:
+                        self._update_termination_point(current_node, new_node, tp)
+                elif (node_field == 'supporting-node'):
+                    self._log.debug(node_field)
+                else:
+                    self._log.info("Update node..Not implemented %s", node_field)
+                    #raise NotImplementedError
+
+
+    """ API for retrieving internal network """
+    def get_network(self, network_id):
+        if (network_id not in self._networks):
+            return None
+        return self._networks[network_id]
+
+    """ API for creating internal network """
+    def create_network(self, key, nw):
+        self._networks[key] = self.rwtop_copy_object(nw)
+
+    """ API for updating internal network """
+    def update_network(self, key, new_nw):
+        if key not in self._networks:
+            self._log.debug("Creating network..New_nw %s", new_nw)
+            self._networks[key] = self.rwtop_copy_object(new_nw)
+            return
+        # Iterating thru changed fields
+        for nw_field in new_nw.fields:
+            nw_field_value = getattr(new_nw, nw_field)
+            self._log.debug("Update nw..nw_field %s", nw_field)
+            if nw_field_value:
+                if (nw_field == 'node'):
+                    for node in new_nw.node:
+                        self._update_node(self._networks[key], new_nw, node)
+                elif (nw_field == 'network_id'):
+                    # Don't change key
+                    pass
+                elif (nw_field == 'link'):
+                    for link in new_nw.link:
+                        self._update_link(self._networks[key], new_nw, link)
+                elif (nw_field == 'network_types'):
+                    self._networks[key].network_types.l2_network = self._networks[key].network_types.l2_network.new()
+                elif (nw_field == 'l2_network_attributes'):
+                    self._update_l2_attr(self._networks[key], new_nw, nw_field_value, nw_field)
+                else:
+                    self._log.info("Update nw..Not implemented %s", nw_field)
+                    #raise NotImplementedError
+
+        
+
diff --git a/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py b/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py
new file mode 100755
index 0000000..b095fbc
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py
@@ -0,0 +1,329 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwSdn', '1.0')
+from gi.repository import (
+    RwDts as rwdts,
+    IetfNetworkYang,
+    IetfNetworkTopologyYang,
+    IetfL2TopologyYang,
+    RwTopologyYang,
+    RwsdnYang,
+    RwTypes
+)
+
+from gi.repository.RwTypes import RwStatus
+import rw_peas
+import rift.tasklets
+
+class SdnGetPluginError(Exception):
+    """ Error while fetching SDN plugin """
+    pass
+  
+  
+class SdnGetInterfaceError(Exception):
+    """ Error while fetching SDN interface"""
+    pass
+
+
+class SdnAccountMgr(object):
+    """ Implements the interface to backend plugins to fetch topology """
+    def __init__(self, log, log_hdl, loop):
+        self._account = {}
+        self._log = log
+        self._log_hdl = log_hdl
+        self._loop = loop
+        self._sdn = {}
+
+        self._regh = None
+
+        self._status = RwsdnYang.SDNAccount_ConnectionStatus(
+                status='unknown',
+                details="Connection status lookup not started"
+                )
+
+        self._validate_task = None
+
+    def set_sdn_account(self,account):
+        if (account.name in self._account):
+            self._log.error("SDN Account is already set")
+        else:
+            sdn_account           = RwsdnYang.SDNAccount()
+            sdn_account.from_dict(account.as_dict())
+            sdn_account.name = account.name
+            self._account[account.name] = sdn_account
+            self._log.debug("Account set is %s , %s",type(self._account), self._account)
+            self.start_validate_credentials(self._loop, account.name)
+
+    def del_sdn_account(self, name):
+        self._log.debug("Account deleted is %s , %s", type(self._account), name)
+        del self._account[name]
+
+    def update_sdn_account(self,account):
+        self._log.debug("Account updated is %s , %s", type(self._account), account)
+        if account.name in self._account:
+            sdn_account = self._account[account.name]
+
+            sdn_account.from_dict(
+                account.as_dict(),
+                ignore_missing_keys=True,
+                )
+            self._account[account.name] = sdn_account
+            self.start_validate_credentials(self._loop, account.name)
+
+    def get_sdn_account(self, name):
+        """
+        Creates an object for class RwsdnYang.SdnAccount()
+        """
+        if (name in self._account):
+            return self._account[name]
+        else:
+            self._log.error("ERROR : SDN account is not configured") 
+
+    def get_saved_sdn_accounts(self, name):
+        ''' Get SDN Account corresponding to passed name, or all saved accounts if name is None'''
+        saved_sdn_accounts = []
+
+        if name is None or name == "":
+            sdn_accounts = list(self._account.values())
+            saved_sdn_accounts.extend(sdn_accounts)
+        elif name in self._account:
+            account = self._account[name]
+            saved_sdn_accounts.append(account)
+        else:
+            errstr = "SDN account {} does not exist".format(name)
+            raise KeyError(errstr)
+
+        return saved_sdn_accounts
+
+    def get_sdn_plugin(self,name):
+        """
+        Loads rw.sdn plugin via libpeas
+        """
+        if (name in self._sdn):
+            return self._sdn[name]
+        account = self.get_sdn_account(name)
+        plugin_name = getattr(account, account.account_type).plugin_name
+        self._log.info("SDN plugin being created")
+        plugin = rw_peas.PeasPlugin(plugin_name, 'RwSdn-1.0')
+        engine, info, extension = plugin()
+
+        self._sdn[name] = plugin.get_interface("Topology")
+        try:
+            rc = self._sdn[name].init(self._log_hdl)
+            assert rc == RwStatus.SUCCESS
+        except:
+            self._log.error("ERROR:SDN plugin instantiation failed ")
+        else:
+            self._log.info("SDN plugin successfully instantiated")
+        return self._sdn[name]
+
+    @asyncio.coroutine
+    def validate_sdn_account_credentials(self, loop, name):
+        self._log.debug("Validating SDN Account credentials %s", name)
+        self._status = RwsdnYang.SDNAccount_ConnectionStatus(
+                status="validating",
+                details="SDN account connection validation in progress"
+                )
+
+        _sdnacct = self.get_sdn_account(name)
+        if (_sdnacct is None):
+            raise SdnGetPluginError
+        _sdnplugin = self.get_sdn_plugin(name)
+        if (_sdnplugin is None):
+            raise SdnGetInterfaceError
+
+        rwstatus, status = yield from loop.run_in_executor(
+                None,
+                _sdnplugin.validate_sdn_creds,
+                _sdnacct,
+                )
+
+        if rwstatus == RwTypes.RwStatus.SUCCESS:
+            self._status = RwsdnYang.SDNAccount_ConnectionStatus.from_dict(status.as_dict())
+        else:
+            self._status = RwsdnYang.SDNAccount_ConnectionStatus(
+                    status="failure",
+                    details="Error when calling CAL validate sdn creds"
+                    )
+
+        self._log.info("Got sdn account validation response: %s", self._status)
+        _sdnacct.connection_status = self._status
+
+    def start_validate_credentials(self, loop, name):
+        if self._validate_task is not None:
+            self._validate_task.cancel()
+            self._validate_task = None
+
+        self._validate_task = asyncio.ensure_future(
+                self.validate_sdn_account_credentials(loop, name),
+                loop=loop
+                )
+
+
+class NwtopDiscoveryDtsHandler(object):
+    """ Handles DTS interactions for the Discovered Topology registration """
+    DISC_XPATH = "D,/nd:network"
+
+    def __init__(self, dts, log, loop, acctmgr, nwdatastore):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._acctmgr = acctmgr
+        self._nwdatastore = nwdatastore
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ The registration handle associated with this Handler"""
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for the Discovered Topology path """
+
+        @asyncio.coroutine
+        def on_ready(regh, status):
+            """  On_ready for Discovered Topology registration """
+            self._log.debug("PUB reg ready for Discovered Topology handler regn_hdl(%s) status %s",
+                                         regh, status)
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare for Discovered Topology registration"""
+            self._log.debug(
+                "Got topology on_prepare callback (xact_info: %s, action: %s): %s",
+                xact_info, action, msg
+                )
+
+            if action == rwdts.QueryAction.READ:
+                
+                for name in self._acctmgr._account:
+                    _sdnacct = self._acctmgr.get_sdn_account(name)
+                    if (_sdnacct is None):
+                        raise SdnGetPluginError
+
+                    _sdnplugin = self._acctmgr.get_sdn_plugin(name)
+                    if (_sdnplugin is None):
+                        raise SdnGetInterfaceError
+
+                    rc, nwtop = _sdnplugin.get_network_list(_sdnacct)
+                    #assert rc == RwStatus.SUCCESS
+                    if rc != RwStatus.SUCCESS:
+                        self._log.error("Fetching get network list for SDN Account %s failed", name)
+                        xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+                        return
+                    
+                    self._log.debug("Topology: Retrieved network attributes ")
+                    for nw in nwtop.network:
+                        # Add SDN account name
+                        nw.rw_network_attributes.sdn_account_name = name
+                        nw.server_provided = False
+                        nw.network_id = name + ':' + nw.network_id
+                        self._log.debug("...Network id %s", nw.network_id)
+                        nw_xpath = ("D,/nd:network[network-id=\'{}\']").format(nw.network_id)
+                        xact_info.respond_xpath(rwdts.XactRspCode.MORE,
+                                        nw_xpath, nw)
+
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+                #err = "%s action on discovered Topology not supported" % action
+                #raise NotImplementedError(err)
+
+        self._log.debug("Registering for discovered topology using xpath %s", NwtopDiscoveryDtsHandler.DISC_XPATH)
+
+        handler = rift.tasklets.DTS.RegistrationHandler(
+            on_ready=on_ready,
+            on_prepare=on_prepare,
+            )
+
+        yield from self._dts.register(
+            NwtopDiscoveryDtsHandler.DISC_XPATH,
+            flags=rwdts.Flag.PUBLISHER,
+            handler=handler
+            )
+
+
+class NwtopStaticDtsHandler(object):
+    """ Handles DTS interactions for the Static Topology registration """
+    STATIC_XPATH = "C,/nd:network"
+
+    def __init__(self, dts, log, loop, acctmgr, nwdatastore):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._acctmgr = acctmgr
+
+        self._regh = None
+        self.pending = {}
+        self._nwdatastore = nwdatastore
+
+    @property
+    def regh(self):
+        """ The registration handle associated with this Handler"""
+        return self._regh
+ 
+    
+    @asyncio.coroutine
+    def register(self):
+        """ Register for the Static Topology path """
+
+        @asyncio.coroutine
+        def prepare_nw_cfg(dts, acg, xact, xact_info, ksp, msg, scratch):
+            """Prepare for application configuration. Stash the pending
+            configuration object for subsequent transaction phases"""
+            self._log.debug("Prepare Network config received network id %s, msg %s",
+                           msg.network_id, msg)
+            self.pending[xact.id] = msg
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        def apply_nw_config(dts, acg, xact, action, scratch):
+            """Apply the pending configuration object"""
+            if action == rwdts.AppconfAction.INSTALL and xact.id is None:
+                self._log.debug("No xact handle.  Skipping apply config")
+                return
+
+            if xact.id not in self.pending:
+                raise KeyError("No stashed configuration found with transaction id [{}]".format(xact.id))
+
+            try:
+                if action == rwdts.AppconfAction.INSTALL:
+                    self._nwdatastore.create_network(self.pending[xact.id].network_id, self.pending[xact.id])
+                elif action == rwdts.AppconfAction.RECONCILE:
+                    self._nwdatastore.update_network(self.pending[xact.id].network_id, self.pending[xact.id])
+            except:
+                raise 
+
+            self._log.debug("Create network config done")
+            return RwTypes.RwStatus.SUCCESS
+
+        self._log.debug("Registering for static topology using xpath %s", NwtopStaticDtsHandler.STATIC_XPATH)
+        handler=rift.tasklets.AppConfGroup.Handler(
+                        on_apply=apply_nw_config)
+
+        with self._dts.appconf_group_create(handler=handler) as acg:
+            acg.register(xpath = NwtopStaticDtsHandler.STATIC_XPATH, 
+                                   flags = rwdts.Flag.SUBSCRIBER, 
+                                   on_prepare=prepare_nw_cfg)
+
+
diff --git a/rwlaunchpad/plugins/rwvns/rift/topmgr/sdnsim.py b/rwlaunchpad/plugins/rwvns/rift/topmgr/sdnsim.py
new file mode 100644
index 0000000..4a6b93b
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/rift/topmgr/sdnsim.py
@@ -0,0 +1,76 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from . import core
+import logging
+
+import xml.etree.ElementTree as etree
+from gi.repository import RwTopologyYang as RwTl
+
+import gi
+gi.require_version('RwYang', '1.0')
+from gi.repository import RwYang
+
+
+logger = logging.getLogger(__name__)
+
+
+class SdnSim(core.Topology):
+    def __init__(self):
+        super(SdnSim, self).__init__()
+        self._model = RwYang.Model.create_libncx()
+        self._model.load_schema_ypbc(RwTl.get_schema())
+
+    def get_network_list(self, account):
+        """
+        Returns the discovered network
+
+        @param account - a SDN account
+
+        """
+
+        nwtop = RwTl.YangData_IetfNetwork()
+        #topology_source = "/net/boson/home1/rchamart/work/topology/l2_top.xml"
+        if not account.sdnsim.has_field('topology_source') or account.sdnsim.topology_source is None:
+            return nwtop
+        topology_source = account.sdnsim.topology_source
+        logger.info("Reading topology file: %s", topology_source)
+        if 'json' in topology_source: 
+            with open(topology_source,'r') as f:
+                print("Reading static topology file")
+                op_json = f.read()
+                nwtop.from_json(self._model,op_json)
+                for nw in nwtop.network:
+                   nw.server_provided = False
+                   logger.debug("...Network id %s", nw.network_id)
+                   #nw_xpath = ("D,/nd:network[network-id=\'{}\']").format(nw.network_id)
+                   #xact_info.respond_xpath(rwdts.XactRspCode.MORE,
+                   #                 nw_xpath, nw)
+        elif 'xml' in topology_source:
+            tree = etree.parse(topology_source)
+            root = tree.getroot()
+            xmlstr = etree.tostring(root, encoding="unicode")
+
+            # The top level topology object does not have XML conversion
+            # Hence going one level down
+            #l2nw1 = nwtop.network.add()
+            #l2nw1.from_xml_v2(self._model, xmlstr)
+            nwtop.from_xml_v2(self._model,xmlstr)
+
+            logger.debug("Returning topology data imported from XML file")
+
+        return nwtop
diff --git a/rwlaunchpad/plugins/rwvns/rift/vlmgr/__init__.py b/rwlaunchpad/plugins/rwvns/rift/vlmgr/__init__.py
new file mode 100644
index 0000000..2bdb77a
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/rift/vlmgr/__init__.py
@@ -0,0 +1,25 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Ravi Chamarty
+# Creation Date: 9/2/2015
+# 
+
+from .rwvlmgr import (
+    VirtualLinkRecordState,
+    VirtualLinkRecord,
+    VlrDtsHandler,
+    VldDtsHandler,
+)
diff --git a/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py b/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py
new file mode 100755
index 0000000..bdea4ef
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py
@@ -0,0 +1,483 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import enum
+import uuid
+import time
+
+import gi
+gi.require_version('RwVlrYang', '1.0')
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+from gi.repository import (
+    RwVlrYang,
+    VldYang,
+    RwDts as rwdts,
+    RwResourceMgrYang,
+)
+import rift.tasklets
+
+
+class NetworkResourceError(Exception):
+    """ Network Resource Error """
+    pass
+
+
+class VlrRecordExistsError(Exception):
+    """ VLR record already exists"""
+    pass
+
+
+class VlRecordError(Exception):
+    """ VLR record error """
+    pass
+
+
+class VirtualLinkRecordState(enum.Enum):
+    """ Virtual Link record state """
+    INIT = 1
+    INSTANTIATING = 2
+    RESOURCE_ALLOC_PENDING = 3
+    READY = 4
+    TERMINATING = 5
+    TERMINATED = 6
+    FAILED = 10
+
+
+class VirtualLinkRecord(object):
+    """
+        Virtual Link Record object
+    """
+    def __init__(self, dts, log, loop, vnsm, vlr_msg, req_id=None):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnsm = vnsm
+        self._vlr_msg = vlr_msg
+
+        self._network_id = None
+        self._network_pool = None
+        self._assigned_subnet = None
+        self._create_time = int(time.time())
+        if req_id == None:
+            self._request_id = str(uuid.uuid4())
+        else:
+            self._request_id = req_id
+
+        self._state = VirtualLinkRecordState.INIT
+        self._state_failed_reason = None
+
+    @property
+    def vld_xpath(self):
+        """ VLD xpath associated with this VLR record """
+        return "C,/vld:vld-catalog/vld:vld[id='{}']".format(self.vld_id)
+
+    @property
+    def vld_id(self):
+        """ VLD id associated with this VLR record """
+        return self._vlr_msg.vld_ref
+
+    @property
+    def vlr_id(self):
+        """ VLR id associated with this VLR record """
+        return self._vlr_msg.id
+
+    @property
+    def xpath(self):
+        """ path for this VLR """
+        return("D,/vlr:vlr-catalog"
+               "/vlr:vlr[vlr:id='{}']".format(self.vlr_id))
+
+    @property
+    def name(self):
+        """ Name of this VLR """
+        return self._vlr_msg.name
+
+    @property
+    def cloud_account_name(self):
+        """ Cloud Account to instantiate the virtual link on """
+        return self._vlr_msg.cloud_account
+
+    @property
+    def resmgr_path(self):
+        """ path for resource-mgr"""
+        return ("D,/rw-resource-mgr:resource-mgmt" +
+                "/vlink-event/vlink-event-data[event-id='{}']".format(self._request_id))
+
+    @property
+    def operational_status(self):
+        """ Operational status of this VLR"""
+        op_stats_dict = {"INIT": "init",
+                         "INSTANTIATING": "vl_alloc_pending",
+                         "RESOURCE_ALLOC_PENDING": "vl_alloc_pending",
+                         "READY": "running",
+                         "FAILED": "failed",
+                         "TERMINATING": "vl_terminate_pending",
+                         "TERMINATED": "terminated"}
+
+        return op_stats_dict[self._state.name]
+
+    @property
+    def msg(self):
+        """ VLR message for this VLR """
+        msg = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr()
+        msg.copy_from(self._vlr_msg)
+
+        if self._network_id is not None:
+            msg.network_id = self._network_id
+
+        if self._network_pool is not None:
+            msg.network_pool = self._network_pool
+
+        if self._assigned_subnet is not None:
+            msg.assigned_subnet = self._assigned_subnet
+
+        msg.operational_status = self.operational_status
+        msg.operational_status_details = self._state_failed_reason
+        msg.res_id = self._request_id
+
+        return msg
+
+    @property
+    def resmgr_msg(self):
+        """ VLR message for this VLR """
+        msg = RwResourceMgrYang.VirtualLinkEventData()
+        msg.event_id = self._request_id
+        msg.cloud_account = self.cloud_account_name
+        msg.request_info.name = self.name
+        msg.request_info.vim_network_name = self._vlr_msg.vim_network_name
+        msg.request_info.provider_network.from_dict(
+                self._vlr_msg.provider_network.as_dict()
+                )
+        if self._vlr_msg.has_field('ip_profile_params'):
+            msg.request_info.ip_profile_params.from_dict(self._vlr_msg.ip_profile_params.as_dict())
+
+        return msg
+
+    @asyncio.coroutine
+    def create_network(self, xact):
+        """ Create network for this VL """
+        self._log.debug("Creating network req-id: %s", self._request_id)
+        return (yield from self.request_network(xact, "create"))
+
+    @asyncio.coroutine
+    def delete_network(self, xact):
+        """ Delete network for this VL """
+        self._log.debug("Deleting network - req-id: %s", self._request_id)
+        return (yield from self.request_network(xact, "delete"))
+
+    @asyncio.coroutine
+    def read_network(self, xact):
+        """ Read network for this VL """
+        self._log.debug("Reading network - req-id: %s", self._request_id)
+        return (yield from self.request_network(xact, "read"))
+
+    @asyncio.coroutine
+    def request_network(self, xact, action):
+        """Request creation/deletion network for this VL """
+
+        block = xact.block_create()
+
+        if action == "create":
+            self._log.debug("Creating network path:%s, msg:%s",
+                            self.resmgr_path, self.resmgr_msg)
+            block.add_query_create(self.resmgr_path, self.resmgr_msg)
+        elif action == "delete":
+            self._log.debug("Deleting network path:%s", self.resmgr_path)
+            if self.resmgr_msg.request_info.name != "multisite":
+                block.add_query_delete(self.resmgr_path)
+        elif action == "read":
+            self._log.debug("Reading network path:%s", self.resmgr_path)
+            block.add_query_read(self.resmgr_path)
+        else:
+            raise VlRecordError("Invalid action %s received" % action)
+
+        res_iter = yield from block.execute(now=True)
+
+        resp = None
+
+        if action == "create" or action == "read":
+            for i in res_iter:
+                r = yield from i
+                resp = r.result
+
+            if resp is None:
+                raise NetworkResourceError("Did not get a network resource response (resp: %s)", resp)
+
+            if resp.has_field('resource_info') and resp.resource_info.resource_state == "failed":
+                raise NetworkResourceError(resp.resource_info.resource_errors)
+
+            if not (resp.has_field('resource_info') and
+                    resp.resource_info.has_field('virtual_link_id')):
+                raise NetworkResourceError("Did not get a valid network resource response (resp: %s)", resp)
+
+            self._log.debug("Got network request response: %s", resp)
+
+        return resp
+
+    @asyncio.coroutine
+    def instantiate(self, xact, restart=0):
+        """ Instantiate this VL """
+        self._state = VirtualLinkRecordState.INSTANTIATING
+
+        self._log.debug("Instantiating VLR path = [%s]", self.xpath)
+
+        try:
+            self._state = VirtualLinkRecordState.RESOURCE_ALLOC_PENDING
+
+            if restart == 0:
+              network_resp = yield from self.create_network(xact)
+            else:
+              network_resp = yield from self.read_network(xact)
+              if network_resp == None:
+                network_resp = yield from self.create_network(xact)
+
+            # Note network_resp.virtual_link_id is CAL assigned network_id.
+
+            self._network_id = network_resp.resource_info.virtual_link_id
+            self._network_pool = network_resp.resource_info.pool_name
+            self._assigned_subnet = network_resp.resource_info.subnet
+
+            self._state = VirtualLinkRecordState.READY
+
+            yield from self.publish(xact)
+
+        except Exception as e:
+            self._log.error("Instantiatiation of  VLR record failed: %s", str(e))
+            self._state = VirtualLinkRecordState.FAILED
+            self._state_failed_reason = str(e)
+            yield from self.publish(xact)
+
+    @asyncio.coroutine
+    def publish(self, xact):
+        """ publish this VLR """
+        vlr = self.msg
+        self._log.debug("Publishing VLR path = [%s], record = [%s]",
+                        self.xpath, self.msg)
+        vlr.create_time = self._create_time
+        yield from self._vnsm.publish_vlr(xact, self.xpath, self.msg)
+        self._log.debug("Published VLR path = [%s], record = [%s]",
+                        self.xpath, self.msg)
+
+    @asyncio.coroutine
+    def terminate(self, xact):
+        """ Terminate this VL """
+        if self._state not in [VirtualLinkRecordState.READY, VirtualLinkRecordState.FAILED]:
+            self._log.error("Ignoring terminate for VL %s is in %s state",
+                            self.vlr_id, self._state)
+            return
+
+        if self._state == VirtualLinkRecordState.READY:
+            self._log.debug("Terminating VL with id %s", self.vlr_id)
+            self._state = VirtualLinkRecordState.TERMINATING
+            try:
+                yield from self.delete_network(xact)
+            except Exception:
+                self._log.exception("Caught exception while deleting VL %s", self.vlr_id)
+            self._log.debug("Terminated VL with id %s", self.vlr_id)
+
+        yield from self.unpublish(xact)
+        self._state = VirtualLinkRecordState.TERMINATED
+
+    @asyncio.coroutine
+    def unpublish(self, xact):
+        """ Unpublish this VLR """
+        self._log.debug("UnPublishing VLR id %s", self.vlr_id)
+        yield from self._vnsm.unpublish_vlr(xact, self.xpath)
+        self._log.debug("UnPublished VLR id %s", self.vlr_id)
+
+
+class VlrDtsHandler(object):
+    """ Handles DTS interactions for the VLR registration """
+    XPATH = "D,/vlr:vlr-catalog/vlr:vlr"
+
+    def __init__(self, dts, log, loop, vnsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnsm = vnsm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ The registration handle assocaited with this Handler"""
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for the VLR path """
+        def on_commit(xact_info):
+            """ The transaction has been committed """
+            self._log.debug("Got vlr commit (xact_info: %s)", xact_info)
+
+            return rwdts.MemberRspCode.ACTION_OK
+
+        @asyncio.coroutine
+        def on_event(dts, g_reg, xact, xact_event, scratch_data):
+            @asyncio.coroutine
+            def instantiate_realloc_vlr(vlr):
+                """Re-populate the virtual link information after restart
+
+                Arguments:
+                    vlink
+
+                """
+
+                with self._dts.transaction(flags=0) as xact:
+                  yield from vlr.instantiate(xact, 1)
+
+            if (xact_event == rwdts.MemberEvent.INSTALL):
+              curr_cfg = self.regh.elements
+              for cfg in curr_cfg:
+                vlr = self._vnsm.create_vlr(cfg)
+                self._loop.create_task(instantiate_realloc_vlr(vlr))
+
+            self._log.debug("Got on_event")
+            return rwdts.MemberRspCode.ACTION_OK
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare for VLR registration"""
+            self._log.debug(
+                "Got vlr on_prepare callback (xact_info: %s, action: %s): %s",
+                xact_info, action, msg
+                )
+
+            if action == rwdts.QueryAction.CREATE:
+                vlr = self._vnsm.create_vlr(msg)
+                with self._dts.transaction(flags=0) as xact:
+                    yield from vlr.instantiate(xact)
+                self._log.debug("Responding to VL create request path:%s, msg:%s",
+                                vlr.xpath, vlr.msg)
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath=vlr.xpath, msg=vlr.msg)
+                return
+            elif action == rwdts.QueryAction.DELETE:
+                # Delete an VLR record
+                schema = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                self._log.debug("Terminating VLR id %s", path_entry.key00.id)
+                yield from self._vnsm.delete_vlr(path_entry.key00.id, xact_info.xact)
+            else:
+                err = "%s action on VirtualLinkRecord not supported" % action
+                raise NotImplementedError(err)
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+            return
+
+        self._log.debug("Registering for VLR using xpath: %s",
+                        VlrDtsHandler.XPATH)
+
+        reg_handle = rift.tasklets.DTS.RegistrationHandler(
+            on_commit=on_commit,
+            on_prepare=on_prepare,
+            )
+        handlers = rift.tasklets.Group.Handler(on_event=on_event,)
+        with self._dts.group_create(handler=handlers) as group:
+            self._regh = group.register(
+                xpath=VlrDtsHandler.XPATH,
+                handler=reg_handle,
+                flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ| rwdts.Flag.DATASTORE,
+                )
+
+    @asyncio.coroutine
+    def create(self, xact, path, msg):
+        """
+        Create a VLR record in DTS with path and message
+        """
+        self._log.debug("Creating VLR xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.create_element(path, msg)
+        self._log.debug("Created VLR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def update(self, xact, path, msg):
+        """
+        Update a VLR record in DTS with path and message
+        """
+        self._log.debug("Updating VLR xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.update_element(path, msg)
+        self._log.debug("Updated VLR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def delete(self, xact, path):
+        """
+        Delete a VLR record in DTS with path and message
+        """
+        self._log.debug("Deleting VLR xact = %s, %s", xact, path)
+        self.regh.delete_element(path)
+        self._log.debug("Deleted VLR xact = %s, %s", xact, path)
+
+
+class VldDtsHandler(object):
+    """ DTS handler for the VLD registration """
+    XPATH = "C,/vld:vld-catalog/vld:vld"
+
+    def __init__(self, dts, log, loop, vnsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnsm = vnsm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ The registration handle assocaited with this Handler"""
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register the VLD path """
+        @asyncio.coroutine
+        def on_prepare(xact_info, query_action, ks_path, msg):
+            """ prepare callback on vld path """
+            self._log.debug(
+                "Got on prepare for VLD update (ks_path: %s) (action: %s)",
+                ks_path.to_xpath(VldYang.get_schema()), msg)
+
+            schema = VldYang.YangData_Vld_VldCatalog_Vld.schema()
+            path_entry = schema.keyspec_to_entry(ks_path)
+            vld_id = path_entry.key00.id
+
+            disabled_actions = [rwdts.QueryAction.DELETE, rwdts.QueryAction.UPDATE]
+            if query_action not in disabled_actions:
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+                return
+
+            vlr = self._vnsm.find_vlr_by_vld_id(vld_id)
+            if vlr is None:
+                self._log.debug(
+                    "Did not find an existing VLR record for vld %s. "
+                    "Permitting %s vld action", vld_id, query_action)
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+                return
+
+            raise VlrRecordExistsError(
+                "Vlr record(s) exists."
+                "Cannot perform %s action on VLD." % query_action)
+
+        handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
+
+        yield from self._dts.register(
+            VldDtsHandler.XPATH,
+            flags=rwdts.Flag.SUBSCRIBER,
+            handler=handler
+            )
diff --git a/rwlaunchpad/plugins/rwvns/rwvnstasklet.py b/rwlaunchpad/plugins/rwvns/rwvnstasklet.py
new file mode 100755
index 0000000..1f1a044
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/rwvnstasklet.py
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwvnstasklet
+
+class Tasklet(rift.tasklets.rwvnstasklet.VnsTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwlaunchpad/plugins/rwvns/test/create_stackedProvNettopology.py b/rwlaunchpad/plugins/rwvns/test/create_stackedProvNettopology.py
new file mode 100644
index 0000000..86638f4
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/test/create_stackedProvNettopology.py
@@ -0,0 +1,331 @@
+#!/bin/python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import gi
+gi.require_version('RwYang', '1.0')
+from gi.repository import IetfL2TopologyYang as l2Tl
+from gi.repository import RwTopologyYang as RwTl
+from gi.repository import RwYang
+from xml.etree import ElementTree as etree
+import subprocess
+import logging
+
+from create_stackedl2topology import MyL2Network
+from create_stackedl2topology import MyL2Topology
+
+class MyNwNotFound(Exception):
+    pass
+
+class MyNodeNotFound(Exception):
+    pass
+
+class MyTpNotFound(Exception):
+    pass
+
+class MyProvNetwork(object):
+    def __init__(self, nwtop, l2top, log):
+        self.next_mac = 11
+        self.log = log
+        self.provnet1 = nwtop.network.add()
+        self.provnet1.network_id = "ProviderNetwork-1"
+
+        self.nwtop = nwtop
+        self.l2top = l2top
+
+        # L2 Network type augmentation
+        self.provnet1.network_types.l2_network = self.provnet1.network_types.l2_network.new()
+        # L2 Network augmentation
+        self.provnet1.l2_network_attributes.name = "Rift LAB SFC-Demo Provider Network"
+        ul_net = self.provnet1.supporting_network.add()
+        try:
+           ul_net.network_ref = l2top.find_nw_id("L2HostNetwork-1")
+           self.l2netid = ul_net.network_ref
+        except TypeError:
+           raise MyNwNotFound()
+
+    def get_nw_id(self, nw_name):
+        for nw in self.nwtop.network:
+            if (nw.network_id == nw_name):
+                return nw.network_id
+
+    def get_node(self, node_name):
+        _node_id = "urn:Rift:Lab:" + node_name
+        for node in self.provnet1.node:
+            if (node.node_id == _node_id):
+                return node
+
+    def get_tp(self, node, tp_name):
+        _tp_id = node.node_id + ":" + tp_name
+        for tp in node.termination_point :
+            if (tp.tp_id == _tp_id):
+                return tp
+
+    def get_link(self, link_name):
+        for link in nw.link :
+            if (link.l2_link_attributes.name == link_name):
+                return link
+
+    def create_node(self, node_name, description, mgmt_ip_addr = None, sup_node = None):
+        logging.debug("Creating node %s", node_name)
+        node = self.provnet1.node.add()
+        node.node_id = "urn:Rift:Lab:" + node_name
+        # L2 Node augmentation
+        node.l2_node_attributes.name = node_name
+        node.l2_node_attributes.description = description
+        if (mgmt_ip_addr is not None):
+            node.l2_node_attributes.management_address.append(mgmt_ip_addr)
+        if (sup_node is not None):
+            logging.debug("  Adding support node %s", sup_node.node_id)
+            ul_node = node.supporting_node.add()
+            ul_node.network_ref = self.l2netid
+            ul_node.node_ref = sup_node.node_id
+        return node
+
+    def create_tp(self, node, cfg_tp, sup_node = None, sup_tp = None, vlan = False):
+        logging.debug("   Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp)
+        tp = node.termination_point.add()
+        tp.tp_id = ("{}:{}").format(node.node_id, cfg_tp)
+        # L2 TP augmentation
+        tp.l2_termination_point_attributes.description = cfg_tp
+        tp.l2_termination_point_attributes.maximum_frame_size = 1500
+        tp.l2_termination_point_attributes.mac_address = "00:4f:9c:ab:dd:" + str(self.next_mac)
+        self.next_mac = self.next_mac + 1
+        if (vlan == True):
+            tp.l2_termination_point_attributes.eth_encapsulation = "l2t:vlan"
+        else:
+            tp.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet"
+        if ((sup_tp is not None) and (sup_node is not None)):
+            logging.debug("     Adding support terminaton point %s", sup_tp.tp_id)
+            ul_tp = tp.supporting_termination_point.add()
+            ul_tp.network_ref = self.l2netid
+            ul_tp.node_ref = sup_node.node_id
+            ul_tp.tp_ref = sup_tp.tp_id
+        return tp
+
+    def create_bidir_link(self, node1, tp1, node2, tp2, link_name1, link_name2):
+        logging.debug("Creating links %s %s", link_name1, link_name2)
+        lnk1= self.provnet1.link.add()
+        lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description)
+        lnk1.source.source_node = node1.node_id
+        lnk1.source.source_tp = tp1.tp_id
+        lnk1.destination.dest_node = node2.node_id
+        lnk1.destination.dest_tp = tp2.tp_id
+        # L2 link augmentation
+        lnk1.l2_link_attributes.name = link_name1
+        #lnk1.l2_link_attributes.rate = 1000000000.00
+
+        lnk2= self.provnet1.link.add()
+        lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description)
+        lnk2.source.source_node = node2.node_id
+        lnk2.source.source_tp = tp2.tp_id
+        lnk2.destination.dest_node = node1.node_id
+        lnk2.destination.dest_tp = tp1.tp_id
+        # L2 link augmentation
+        lnk2.l2_link_attributes.name = link_name2
+        #lnk2.l2_link_attributes.rate = 1000000000.00
+        return lnk1, lnk2
+
+class MyProvTopology(MyProvNetwork):
+    def __init__(self, nwtop, l2top, log):
+        super(MyProvTopology, self).__init__(nwtop, l2top, log)
+
+    def find_nw_id(self, nw_name):
+        return self.get_nw_id(nw_name)
+
+    def find_node(self, node_name):
+        return self.get_node(node_name)
+
+    def find_tp(self, node, tp_name):
+        return self.get_tp(node, tp_name)
+
+    def find_link(self, link_name):
+        return self.get_link(link_name)
+
+    def setup_nodes(self):
+        logging.debug("Setting up nodes")
+        self.pseudo_mgmt_node = self.create_node("Pseudo_mgmt_node", "Pseudo node for VM mgmt network LAN")
+        self.pseudo_dp_node = self.create_node("Pseudo_DP_node", "Pseudo node for DP network LAN")
+
+        self.g118_node = self.l2top.find_node("Grunt118")
+        if (self.g118_node is None):
+           raise MyNodeNotFound()
+        self.g44_node = self.l2top.find_node("Grunt44")
+        if (self.g44_node is None):
+           raise MyNodeNotFound()
+        self.g120_node = self.l2top.find_node("Grunt120")
+        if (self.g120_node is None):
+           raise MyNodeNotFound()
+
+        self.g118_br_int = self.create_node("G118_Br_Int","OVS Integration bridge on Grunt118", mgmt_ip_addr="10.66.4.118", sup_node = self.g118_node)
+        self.g118_br_eth1 = self.create_node("G118_Br_Eth1","OVS Integration bridge on Grunt118", mgmt_ip_addr="10.66.4.118", sup_node = self.g118_node)
+        # eth2 on g118 is being used in PCI passthrough mode
+
+        self.g44_br_int = self.create_node("G44_Br_Int","OVS Integration bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node)
+        self.g44_br_eth1 = self.create_node("G44_Br_Eth1","OVS Interface bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node)
+        self.g44_br_eth2 = self.create_node("G44_Br_Eth2","OVS Interface bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node)
+        self.g44_br_eth3 = self.create_node("G44_Br_Eth3","OVS Interface bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node)
+
+        self.g120_br_int = self.create_node("G120_Br_Int","OVS Integration bridge on Grunt120", mgmt_ip_addr = "10.66.4.120", sup_node = self.g120_node)
+        self.g120_br_eth1 = self.create_node("G120_Br_Eth1","OVS Integration bridge on Grunt120", mgmt_ip_addr = "10.66.4.120", sup_node = self.g120_node)
+        # eth2 on g120 is being used in PCI passthrough mode
+
+    def setup_tps(self):
+        logging.debug("Setting up termination points")
+        self.g118_e1 = self.l2top.find_tp(self.g118_node, "eth1")
+        if (self.g118_e1 is None):
+           raise MyTpNotFound()
+        self.g44_e1 = self.l2top.find_tp(self.g44_node, "eth1")
+        if (self.g44_e1 is None):
+           raise MyTpNotFound()
+        self.g44_e2 = self.l2top.find_tp(self.g44_node, "eth2")
+        if (self.g44_e2 is None):
+           raise MyTpNotFound()
+        self.g44_e3 = self.l2top.find_tp(self.g44_node, "eth3")
+        if (self.g44_e3 is None):
+           raise MyTpNotFound()
+        self.g120_e1 = self.l2top.find_tp(self.g120_node, "eth1")
+        if (self.g44_e3 is None):
+           raise MyTpNotFound()
+
+        self.g118_br_int_eth1 = self.create_tp(self.g118_br_int, "int-br-eth1")
+        self.g118_br_int_tap1 = self.create_tp(self.g118_br_int, "tap1")
+
+        self.g118_br_eth1_phyeth1 = self.create_tp(self.g118_br_eth1, "phyeth1")
+        self.g118_br_eth1_eth1 = self.create_tp(self.g118_br_eth1, "eth1", sup_node=self.g118_node, sup_tp=self.g118_e1, vlan=True)
+
+        self.g44_br_int_eth1 = self.create_tp(self.g44_br_int, "int-br-eth1")
+        self.g44_br_int_vhu1 = self.create_tp(self.g44_br_int, "vhu1")
+        self.g44_br_int_eth2 = self.create_tp(self.g44_br_int, "int-br-eth2")
+        self.g44_br_int_vhu2 = self.create_tp(self.g44_br_int, "vhu2")
+        self.g44_br_int_eth1 = self.create_tp(self.g44_br_int, "int-br-eth3")
+        self.g44_br_int_vhu1 = self.create_tp(self.g44_br_int, "vhu3")
+
+        self.g44_br_eth1_phyeth1 = self.create_tp(self.g44_br_eth1, "phyeth1")
+        self.g44_br_eth1_dpdk0 = self.create_tp(self.g44_br_eth1, "dpdk0", sup_node=self.g44_node, sup_tp=self.g44_e1, vlan=True)
+
+        self.g44_br_eth2_phyeth1 = self.create_tp(self.g44_br_eth2, "phyeth2")
+        self.g44_br_eth2_dpdk1 = self.create_tp(self.g44_br_eth2, "dpdk1", sup_node=self.g44_node, sup_tp=self.g44_e2)
+
+        self.g44_br_eth3_phyeth1 = self.create_tp(self.g44_br_eth3, "phyeth3")
+        self.g44_br_eth3_dpdk2 = self.create_tp(self.g44_br_eth3, "dpdk2", sup_node=self.g44_node, sup_tp=self.g44_e3)
+
+        self.g120_br_int_eth1 = self.create_tp(self.g120_br_int, "int-br-eth1")
+        self.g120_br_int_tap1 = self.create_tp(self.g120_br_int, "tap1")
+
+        self.g120_br_eth1_phyeth1 = self.create_tp(self.g120_br_eth1, "phyeth1")
+        self.g120_br_eth1_eth1 = self.create_tp(self.g120_br_eth1, "eth1", sup_node=self.g120_node, sup_tp=self.g120_e1, vlan=True)
+
+        self.pmn_eth1 = self.create_tp(self.pseudo_mgmt_node, "eth1")
+        self.pmn_eth2 = self.create_tp(self.pseudo_mgmt_node, "eth2")
+        self.pmn_eth3 = self.create_tp(self.pseudo_mgmt_node, "eth3")
+
+    def setup_links(self):
+        # Add links to provnet1 network
+        # These links are unidirectional and point-to-point
+        logging.debug("Setting up links")
+        # Bidir Links for OVS bridges
+        self.create_bidir_link(self.g118_br_eth1, self.g118_br_eth1_eth1, self.pseudo_mgmt_node, self.pmn_eth1, "Link_g118_be1_pmn_e1", "Link_pmn_e1_g118_be1")
+        self.create_bidir_link(self.g44_br_eth1, self.g44_br_eth1_dpdk0, self.pseudo_mgmt_node, self.pmn_eth2, "Link_g44_be1_pmn_d0", "Link_pmn_e2_g44_d0")
+        self.create_bidir_link(self.g120_br_eth1, self.g120_br_eth1_eth1, self.pseudo_mgmt_node, self.pmn_eth3, "Link_g120_be1_pmn_e3", "Link_pmn_e3_g120_be1")
+        # Data path links cannot be represented here since PCI pass through is beingused on G118 and G44
+
+    def setup_all(self):
+        self.setup_nodes()
+        self.setup_tps()
+        self.setup_links()
+
+def adjust_xml_file(infile, outfile, begin_marker, end_marker):
+    buffer = []
+    in_block = False
+    max_interesting_line_toread = 1
+    interesting_line = 0
+    with open(infile) as inf:
+        with open(outfile, 'w') as outf:
+            for line in inf:
+                if begin_marker in line:
+                    in_block = True
+                    # Go down
+                if end_marker in line:
+                    assert in_block is True
+                    print("End of gathering line...", line)
+                    buffer.append(line)  # gather lines
+                    interesting_line = max_interesting_line_toread
+                    in_block = False
+                    continue
+                if interesting_line:
+                    print("Interesting line printing ...", line)
+                    outf.write(line)
+                    interesting_line -= 1
+                    if interesting_line == 0:  # output gathered lines
+                        for lbuf in buffer:
+                            outf.write(lbuf)
+                        buffer = []  # empty buffer 
+                        print("\n\n")
+                    continue
+
+                if in_block:
+                    print("Gathering line...", line)
+                    buffer.append(line)  # gather lines
+                else:
+                    outf.write(line)
+
+
+if __name__ == "__main__":
+    model = RwYang.Model.create_libncx()
+    model.load_schema_ypbc(RwTl.get_schema())
+    # create logger 
+    logger = logging.getLogger('Provider Network Topology')
+    logger.setLevel(logging.DEBUG)
+    logging.basicConfig(level=logging.DEBUG)
+
+    logger.info('Creating an instance of Provider Network Topology')
+
+    nwtop = RwTl.YangData_IetfNetwork()
+
+    # Setup L2 topology
+    l2top = MyL2Topology(nwtop, logger)
+    l2top.setup_all()
+
+    # Setup Provider network topology
+    provtop = MyProvTopology(nwtop, l2top, logger)
+    provtop.setup_all()
+
+    print ("Converting to XML")
+    # Convert l2nw network to XML
+    xml_str = nwtop.to_xml_v2(model)
+    tree = etree.XML(xml_str)
+    xml_file = "/tmp/stacked_provtop.xml"
+    xml_formatted_file = "/tmp/stacked_provtop2.xml"
+    with open(xml_file, "w") as f:
+        f.write(xml_str)
+    status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True)
+
+    status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True)
+
+    print ("Converting to JSON ")
+    # Convert set of topologies to JSON
+    json_str = nwtop.to_json(model)
+    with open("/tmp/stacked_provtop.json", "w") as f:
+        f.write(json_str)
+    status = subprocess.call("python -m json.tool /tmp/stacked_provtop.json > /tmp/stacked_provtop2.json", shell=True)
+    json_formatted_file = "/tmp/stacked_provtop2.json"
+    status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True)
+    status = subprocess.call("sed -i -e 's/\"l2t:vlan\"/\"vlan\"/g' " + json_formatted_file, shell=True)
diff --git a/rwlaunchpad/plugins/rwvns/test/create_stackedSfctopology.py b/rwlaunchpad/plugins/rwvns/test/create_stackedSfctopology.py
new file mode 100644
index 0000000..a27a0b9
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/test/create_stackedSfctopology.py
@@ -0,0 +1,277 @@
+#!/bin/python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import gi
+gi.require_version('RwYang', '1.0')
+from gi.repository import IetfL2TopologyYang as l2Tl
+from gi.repository import RwTopologyYang as RwTl
+from gi.repository import RwYang
+from xml.etree import ElementTree as etree
+import subprocess
+import logging
+
+from create_stackedl2topology import MyL2Network
+from create_stackedl2topology import MyL2Topology
+from create_stackedProvNettopology import MyProvNetwork
+from create_stackedProvNettopology import MyProvTopology
+from create_stackedVMNettopology import MyVMNetwork
+from create_stackedVMNettopology import MyVMTopology
+
+
+class MyNwNotFound(Exception):
+    pass
+
+class MyNodeNotFound(Exception):
+    pass
+
+class MyTpNotFound(Exception):
+    pass
+
+class MySfcNetwork(object):
+    def __init__(self, nwtop, l2top, provtop, vmtop, log):
+        self.next_mac = 81
+        self.log = log
+        self.sfcnet1 = nwtop.network.add()
+        self.sfcnet1.network_id = "SfcNetwork-1"
+
+        self.l2top = l2top
+        self.provtop = provtop
+        self.vmtop = vmtop
+
+        # L2 Network type augmentation
+        self.sfcnet1.network_types.l2_network = self.sfcnet1.network_types.l2_network.new()
+        # L2 Network augmentation
+        self.sfcnet1.l2_network_attributes.name = "Rift LAB SFC-Demo SFC Network"
+        try:
+           self.l2netid = l2top.find_nw_id("L2HostNetwork-1")
+        except TypeError:
+           raise MyNwNotFound()
+        ul_net = self.sfcnet1.supporting_network.add()
+        try:
+           ul_net.network_ref = provtop.find_nw_id("ProviderNetwork-1")
+           self.provnetid = ul_net.network_ref
+        except TypeError:
+           raise MyNwNotFound()
+        ul_net = self.sfcnet1.supporting_network.add()
+        try:
+           ul_net.network_ref = vmtop.find_nw_id("VmNetwork-1")
+           self.vmnetid = ul_net.network_ref
+        except TypeError:
+           raise MyNwNotFound()
+
+    def get_nw_id(self, nw_name):
+        for nw in self.nwtop.network:
+            if (nw.network_id == nw_name):
+                return nw.network_id
+
+    def get_node(self, node_name):
+        _node_id = "urn:Rift:Lab:" + node_name
+        for node in self.sfcnet1.node:
+            if (node.node_id == _node_id):
+                return node
+
+    def get_tp(self, node, tp_name):
+        _tp_id = "urn:Rift:Lab:" + node.node_id + "_" + tp_name
+        for tp in node.termination_point :
+            if (tp.tp_id == _tp_id):
+                return tp
+
+    def get_link(self, link_name):
+        for link in nw.link :
+            if (link.l2_link_attributes.name == link_name):
+                return link
+
+    def create_node(self, node_name, description, mgmt_ip_addr = None, sup_node = None, nw_ref = None):
+        logging.debug("Creating node %s", node_name)
+        node = self.sfcnet1.node.add()
+        node.node_id = "urn:Rift:Lab:" + node_name
+        # L2 Node augmentation
+        node.l2_node_attributes.name = node_name
+        node.l2_node_attributes.description = description
+        if (mgmt_ip_addr is not None):
+            node.l2_node_attributes.management_address.append(mgmt_ip_addr)
+        if (sup_node is not None):
+            logging.debug("  Adding support node %s", sup_node.node_id)
+            ul_node = node.supporting_node.add()
+            if (nw_ref is not None):
+                ul_node.network_ref = nw_ref
+            else:
+                ul_node.network_ref = self.l2netid
+            ul_node.node_ref = sup_node.node_id
+        return node
+
+    def create_tp(self, node, cfg_tp, sup_node = None, sup_tp = None, nw_ref = None):
+        logging.debug("   Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp)
+        tp = node.termination_point.add()
+        tp.tp_id = ("{}:{}").format(node.node_id, cfg_tp)
+        # L2 TP augmentation
+        tp.l2_termination_point_attributes.description = cfg_tp
+        tp.l2_termination_point_attributes.maximum_frame_size = 1500
+        #tp.l2_termination_point_attributes.mac_address = "00:5e:8a:ab:dd:" + str(self.next_mac)
+        #self.next_mac = self.next_mac + 1
+        tp.l2_termination_point_attributes.eth_encapsulation = "l2t:vxlan"
+        if ((sup_tp is not None) and (sup_node is not None)):
+            logging.debug("     Adding support terminaton point %s", sup_tp.tp_id)
+            ul_tp = tp.supporting_termination_point.add()
+            if (nw_ref is not None):
+                ul_tp.network_ref = nw_ref
+            else:
+                ul_tp.network_ref = self.l2netid
+            ul_tp.node_ref = sup_node.node_id
+            ul_tp.tp_ref = sup_tp.tp_id
+        return tp
+
+    def create_link(self, node1, tp1, node2, tp2, link_name1, link_name2 = None):
+        logging.debug("Creating links %s %s", link_name1, link_name2)
+        lnk1= self.sfcnet1.link.add()
+        lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description)
+        lnk1.source.source_node = node1.node_id
+        lnk1.source.source_tp = tp1.tp_id
+        lnk1.destination.dest_node = node2.node_id
+        lnk1.destination.dest_tp = tp2.tp_id
+        # L2 link augmentation
+        lnk1.l2_link_attributes.name = link_name1
+        lnk1.l2_link_attributes.rate = 1000000000.00
+
+        # Create bidir link if second link is provided
+        if (link_name2 is not None):
+            lnk2= self.sfcnet1.link.add()
+            lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description)
+            lnk2.source.source_node = node2.node_id
+            lnk2.source.source_tp = tp2.tp_id
+            lnk2.destination.dest_node = node1.node_id
+            lnk2.destination.dest_tp = tp1.tp_id
+            # L2 link augmentation
+            lnk2.l2_link_attributes.name = link_name2
+            lnk2.l2_link_attributes.rate = 1000000000.00
+
+
+class MySfcTopology(MySfcNetwork):
+    def __init__(self, nwtop, l2top, provtop, vmnet, log):
+        super(MySfcTopology, self).__init__(nwtop, l2top, provtop, vmnet, log)
+
+    def find_nw_id(self, nw_name):
+        return self.get_nw_id(nw_name)
+
+    def find_node(self, node_name):
+        return self.get_node(node_name)
+
+    def find_tp(self, node, tp_name):
+        return self.get_tp(node, tp_name)
+
+    def find_link(self, link_name):
+        return self.get_link(link_name)
+
+    def setup_nodes(self):
+        logging.debug("Setting up nodes")
+
+        self.tg_node = self.vmtop.find_node("Trafgen_VM")
+        if (self.tg_node is None):
+           raise MyNodeNotFound()
+        self.lb_node = self.vmtop.find_node("LB_VM")
+        if (self.lb_node is None):
+           raise MyNodeNotFound()
+
+        self.g44_br_int_node = self.provtop.find_node("G44_Br_Int")
+        if (self.g44_br_int_node is None):
+           raise MyNodeNotFound()
+
+        self.sf1 = self.create_node("SF1","SF on LB VM", sup_node = self.lb_node, nw_ref = self.vmnetid)
+        self.sfc1 = self.create_node("SFC1","SF classifier on Trafgen VM", sup_node = self.tg_node, nw_ref = self.vmnetid)
+        self.sff1 = self.create_node("SFF1","SF forwarder on Grunt44 OVS integration bridge", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_br_int_node, nw_ref = self.provnetid)
+
+    def setup_tps(self):
+        logging.debug("Setting up termination points")
+        # FInd L2 hosts
+        #self.g44_e2 = self.l2top.find_tp(self.g44_node, "eth2")
+        #if (self.g44_e2 is None):
+        #   raise MyTpNotFound()
+
+        self.sfc1_vxlannsh1 = self.create_tp(self.sfc1, "vxlannsh1")
+        self.sf1_vxlannsh1 = self.create_tp(self.sf1, "vxlannsh1")
+        self.sff1_vxlannsh1 = self.create_tp(self.sff1, "vxlannsh1")
+
+
+    def setup_links(self):
+        # Add links to sfcnet1 network
+        # These links are unidirectional and point-to-point
+        logging.debug("Setting up links")
+        # Bidir Links for OVS bridges
+        self.create_link(self.sfc1, self.sfc1_vxlannsh1, self.sff1, self.sff1_vxlannsh1, "Link_sfc1_sff1")
+        self.create_link(self.sfc1, self.sfc1_vxlannsh1, self.sf1, self.sf1_vxlannsh1, "Link_sff1_sf1", "Link_sf1_sff1")
+
+    def setup_all(self):
+        self.setup_nodes()
+        self.setup_tps()
+        #self.setup_links()
+
+
+if __name__ == "__main__":
+    model = RwYang.Model.create_libncx()
+    model.load_schema_ypbc(RwTl.get_schema())
+    # create logger 
+    logger = logging.getLogger('SFC Network Topology')
+    logger.setLevel(logging.DEBUG)
+    logging.basicConfig(level=logging.DEBUG)
+
+    logger.info('Creating an instance of SFC Network Topology')
+
+    nwtop = RwTl.YangData_IetfNetwork()
+
+    # Setup L2 topology
+    l2top = MyL2Topology(nwtop, logger)
+    l2top.setup_all()
+
+    # Setup Provider network topology
+    provtop = MyProvTopology(nwtop, l2top, logger)
+    provtop.setup_all()
+
+    # Setup VM network topology
+    vmtop = MyVMTopology(nwtop, l2top, provtop, logger)
+    vmtop.setup_all()
+
+    # Setup SFC network topology
+    sfctop = MySfcTopology(nwtop, l2top, provtop, vmtop, logger)
+    sfctop.setup_all()
+
+    print ("Converting to XML")
+    # Convert l2nw network to XML
+    xml_str = nwtop.to_xml_v2(model)
+    tree = etree.XML(xml_str)
+    xml_file = "/tmp/stacked_sfctop.xml"
+    xml_formatted_file = "/tmp/stacked_sfctop2.xml"
+    with open(xml_file, "w") as f:
+        f.write(xml_str)
+    status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True)
+
+    status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True)
+
+    print ("Converting to JSON ")
+    # Convert set of topologies to JSON
+    json_str = nwtop.to_json(model)
+    with open("/tmp/stacked_sfctop.json", "w") as f:
+        f.write(json_str)
+    status = subprocess.call("python -m json.tool /tmp/stacked_sfctop.json > /tmp/stacked_sfctop2.json", shell=True)
+    json_formatted_file = "/tmp/stacked_sfctop2.json"
+    status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True)
+    status = subprocess.call("sed -i -e 's/\"l2t:vlan\"/\"vlan\"/g' " + json_formatted_file, shell=True)
+    status = subprocess.call("sed -i -e 's/\"l2t:vxlan\"/\"vxlan\"/g' " + json_formatted_file, shell=True)
+
diff --git a/rwlaunchpad/plugins/rwvns/test/create_stackedVMNettopology.py b/rwlaunchpad/plugins/rwvns/test/create_stackedVMNettopology.py
new file mode 100644
index 0000000..99f5898
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/test/create_stackedVMNettopology.py
@@ -0,0 +1,332 @@
+#!/bin/python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import gi
+gi.require_version('RwYang', '1.0')
+from gi.repository import IetfL2TopologyYang as l2Tl
+from gi.repository import RwTopologyYang as RwTl
+from gi.repository import RwYang
+from xml.etree import ElementTree as etree
+import subprocess
+import logging
+
+from create_stackedl2topology import MyL2Network
+from create_stackedl2topology import MyL2Topology
+from create_stackedProvNettopology import MyProvNetwork
+from create_stackedProvNettopology import MyProvTopology
+
+class MyNwNotFound(Exception):
+    pass
+
+class MyNodeNotFound(Exception):
+    pass
+
+class MyTpNotFound(Exception):
+    pass
+
+class MyVMNetwork(object):
+    def __init__(self, nwtop, l2top, provtop, log):
+        self.next_mac = 41
+        self.log = log
+        self.vmnet1 = nwtop.network.add()
+        self.vmnet1.network_id = "VmNetwork-1"
+
+        self.nwtop = nwtop
+        self.l2top = l2top
+        self.provtop = provtop
+
+        # L2 Network type augmentation
+        self.vmnet1.network_types.l2_network = self.vmnet1.network_types.l2_network.new()
+        # L2 Network augmentation
+        self.vmnet1.l2_network_attributes.name = "Rift LAB SFC-Demo VM Network"
+        ul_net = self.vmnet1.supporting_network.add()
+        try:
+           ul_net.network_ref = l2top.find_nw_id("L2HostNetwork-1")
+           self.l2netid = ul_net.network_ref
+        except TypeError:
+           raise MyNwNotFound()
+        ul_net = self.vmnet1.supporting_network.add()
+        try:
+           ul_net.network_ref = provtop.find_nw_id("ProviderNetwork-1")
+           self.provnetid = ul_net.network_ref
+        except TypeError:
+           raise MyNwNotFound()
+
+    def get_nw_id(self, nw_name):
+        for nw in self.nwtop.network:
+            if (nw.network_id == nw_name):
+                return nw.network_id
+
+    def get_node(self, node_name):
+        _node_id = "urn:Rift:Lab:" + node_name
+        for node in self.vmnet1.node:
+            if (node.node_id == _node_id):
+                return node
+
+    def get_tp(self, node, tp_name):
+        _tp_id = node.node_id + "_" + tp_name
+        for tp in node.termination_point :
+            if (tp.tp_id == _tp_id):
+                return tp
+
+    def get_link(self, link_name):
+        for link in nw.link :
+            if (link.l2_link_attributes.name == link_name):
+                return link
+
+    def create_node(self, node_name, description, mgmt_ip_addr=None, sup_node_list=None):
+        logging.debug("Creating node %s", node_name)
+        node = self.vmnet1.node.add()
+        node.node_id = "urn:Rift:Lab:" + node_name
+        # L2 Node augmentation
+        node.l2_node_attributes.name = node_name
+        node.l2_node_attributes.description = description
+        if (mgmt_ip_addr is not None):
+            node.l2_node_attributes.management_address.append(mgmt_ip_addr)
+        if (sup_node_list is not None):
+            for sup_node in sup_node_list:
+                logging.debug("  Adding support node %s", sup_node[0].node_id)
+                ul_node = node.supporting_node.add()
+                # Second element is hardcoded as nw ref
+                if (sup_node[1] is not None):
+                    ul_node.network_ref = sup_node[1]
+                else:
+                    ul_node.network_ref = self.l2netid
+                ul_node.node_ref = sup_node[0].node_id
+        return node
+
+    def create_tp(self, node, cfg_tp, sup_node = None, sup_tp = None, nw_ref = None):
+        logging.debug("   Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp)
+        tp = node.termination_point.add()
+        tp.tp_id = ("{}:{}").format(node.node_id, cfg_tp)
+        # L2 TP augmentation
+        tp.l2_termination_point_attributes.description = cfg_tp
+        tp.l2_termination_point_attributes.maximum_frame_size = 1500
+        tp.l2_termination_point_attributes.mac_address = "00:5e:8a:ab:cc:" + str(self.next_mac)
+        self.next_mac = self.next_mac + 1
+        tp.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet"
+        if ((sup_tp is not None) and (sup_node is not None)):
+            logging.debug("     Adding support terminaton point %s", sup_tp.tp_id)
+            ul_tp = tp.supporting_termination_point.add()
+            if (nw_ref is not None):
+                ul_tp.network_ref = nw_ref
+            else:
+                ul_tp.network_ref = self.l2netid
+            ul_tp.node_ref = sup_node.node_id
+            ul_tp.tp_ref = sup_tp.tp_id
+        return tp
+
+    def create_bidir_link(self, node1, tp1, node2, tp2, link_name1, link_name2):
+        logging.debug("Creating links %s %s", link_name1, link_name2)
+        lnk1= self.vmnet1.link.add()
+        lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description)
+        lnk1.source.source_node = node1.node_id
+        lnk1.source.source_tp = tp1.tp_id
+        lnk1.destination.dest_node = node2.node_id
+        lnk1.destination.dest_tp = tp2.tp_id
+        # L2 link augmentation
+        lnk1.l2_link_attributes.name = link_name1
+        #lnk1.l2_link_attributes.rate = 1000000000.00
+
+        lnk2= self.vmnet1.link.add()
+        lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description)
+        lnk2.source.source_node = node2.node_id
+        lnk2.source.source_tp = tp2.tp_id
+        lnk2.destination.dest_node = node1.node_id
+        lnk2.destination.dest_tp = tp1.tp_id
+        # L2 link augmentation
+        lnk2.l2_link_attributes.name = link_name2
+        #lnk2.l2_link_attributes.rate = 1000000000.00
+        return lnk1, lnk2
+
+class MyVMTopology(MyVMNetwork):
+    def __init__(self, nwtop, l2top, provtop, log):
+        super(MyVMTopology, self).__init__(nwtop, l2top, provtop, log)
+
+    def find_nw_id(self, nw_name):
+        return self.get_nw_id(nw_name)
+
+    def find_node(self, node_name):
+        return self.get_node(node_name)
+
+    def find_tp(self, node, tp_name):
+        return self.get_tp(node, tp_name)
+
+   
+    def find_link(self, link_name):
+        return self.get_link(link_name)
+
+    def setup_nodes(self):
+        logging.debug("Setting up nodes")
+
+        self.g118_node = self.l2top.find_node("Grunt118")
+        if (self.g118_node is None):
+           raise MyNodeNotFound()
+        self.g44_node = self.l2top.find_node("Grunt44")
+        if (self.g44_node is None):
+           raise MyNodeNotFound()
+        self.g120_node = self.l2top.find_node("Grunt120")
+        if (self.g120_node is None):
+           raise MyNodeNotFound()
+
+        self.g44_br_int_node = self.provtop.find_node("G44_Br_Int")
+        if (self.g44_br_int_node is None):
+           raise MyNodeNotFound()
+
+        self.pseudo_vm = self.create_node("Pseudo_VM","Pseudo VM to manage eth0 LAN")
+        sup_node_list = [[self.g118_node, self.l2netid], [self.g44_br_int_node, self.provnetid]]
+        self.tg_vm = self.create_node("Trafgen_VM","Trafgen VM on Grunt118", mgmt_ip_addr="10.0.118.3", sup_node_list = sup_node_list)
+        sup_node_list = [[self.g44_node, self.l2netid], [self.g44_br_int_node, self.provnetid]]
+        self.lb_vm = self.create_node("LB_VM","LB VM on Grunt44", mgmt_ip_addr="10.0.118.35", sup_node_list = sup_node_list)
+        sup_node_list = [[self.g120_node, self.l2netid], [self.g44_br_int_node, self.provnetid]]
+        self.ts_vm = self.create_node("Trafsink_VM","Trafsink VM on Grunt120", mgmt_ip_addr="10.0.118.4", sup_node_list = sup_node_list)
+
+    def setup_tps(self):
+        logging.debug("Setting up termination points")
+        # FInd L2 hosts
+        self.g118_e2 = self.l2top.find_tp(self.g118_node, "eth2")
+        if (self.g118_e2 is None):
+           raise MyTpNotFound()
+        self.g44_e2 = self.l2top.find_tp(self.g44_node, "eth2")
+        if (self.g44_e2 is None):
+           raise MyTpNotFound()
+        # Find OVS tps
+        self.g44_br_int_vhu2 = self.provtop.find_tp(self.g44_br_int_node, "vhu2")
+        if (self.g44_br_int_vhu2 is None):
+           raise MyTpNotFound()
+        self.g44_br_int_vhu3 = self.provtop.find_tp(self.g44_br_int_node, "vhu3")
+        if (self.g44_br_int_vhu3 is None):
+           raise MyTpNotFound()
+
+        self.pvm_eth1 = self.create_tp(self.pseudo_vm, "eth1") 
+        self.pvm_eth2 = self.create_tp(self.pseudo_vm, "eth2") 
+        self.pvm_eth3 = self.create_tp(self.pseudo_vm, "eth3") 
+
+        self.tg_vm_eth0 = self.create_tp(self.tg_vm, "eth0")
+        self.tg_vm_trafgen11 = self.create_tp(self.tg_vm, "trafgen11", sup_node=self.g118_node, sup_tp=self.g118_e2)
+
+        self.lb_vm_eth0 = self.create_tp(self.lb_vm, "eth0")
+        self.lb_vm_lb21 = self.create_tp(self.lb_vm, "load_balancer21", sup_node=self.g44_br_int_node, sup_tp=self.g44_br_int_vhu2, nw_ref=self.provnetid)
+        self.lb_vm_lb22 = self.create_tp(self.lb_vm, "load_balancer22", sup_node=self.g44_br_int_node, sup_tp=self.g44_br_int_vhu3, nw_ref=self.provnetid)
+
+        self.ts_vm_eth0 = self.create_tp(self.ts_vm, "eth0")
+        self.ts_vm_trafsink31 = self.create_tp(self.ts_vm, "trafsink31", sup_node=self.g44_node, sup_tp=self.g44_e2)
+
+
+    def setup_links(self):
+        # Add links to vmnet1 network
+        # These links are unidirectional and point-to-point
+        logging.debug("Setting up links")
+        # Bidir Links for OVS bridges
+        self.create_bidir_link(self.tg_vm, self.tg_vm_trafgen11, self.lb_vm, self.lb_vm_lb21, "Link_tg_t11_lb_lb21", "Link_lb_lb21_tg_t11")
+        self.create_bidir_link(self.ts_vm, self.ts_vm_trafsink31, self.lb_vm, self.lb_vm_lb22, "Link_ts_t31_lb_lb22", "Link_lb_lb22_tg_t31")
+
+        self.create_bidir_link(self.pseudo_vm, self.pvm_eth1, self.tg_vm, self.tg_vm_eth0, "Link_pvm_e1_tgv_e0", "Link_tgv_e0_pvm_e1")
+        self.create_bidir_link(self.pseudo_vm, self.pvm_eth2, self.lb_vm, self.lb_vm_eth0, "Link_pvm_e2_lbv_e0", "Link_lbv_e0_pvm_e2")
+        self.create_bidir_link(self.pseudo_vm, self.pvm_eth3, self.ts_vm, self.ts_vm_eth0, "Link_pvm_e3_tsv_e0", "Link_tsv_e0_pvm_e3")
+
+    def setup_all(self):
+        self.setup_nodes()
+        self.setup_tps()
+        self.setup_links()
+
+def adjust_xml_file(infile, outfile, begin_marker, end_marker):
+    buffer = []
+    in_block = False
+    max_interesting_line_toread = 1
+    interesting_line = 0
+    with open(infile) as inf:
+        with open(outfile, 'w') as outf:
+            for line in inf:
+                if begin_marker in line:
+                    in_block = True
+                    # Go down
+                if end_marker in line:
+                    assert in_block is True
+                    print("End of gathering line...", line)
+                    buffer.append(line)  # gather lines
+                    interesting_line = max_interesting_line_toread
+                    in_block = False
+                    continue
+                if interesting_line:
+                    print("Interesting line printing ...", line)
+                    outf.write(line)
+                    interesting_line -= 1
+                    if interesting_line == 0:  # output gathered lines
+                        for lbuf in buffer:
+                            outf.write(lbuf)
+                        buffer = []  # empty buffer 
+                        print("\n\n")
+                    continue
+
+                if in_block:
+                    print("Gathering line...", line)
+                    buffer.append(line)  # gather lines
+                else:
+                    outf.write(line)
+
+
+if __name__ == "__main__":
+    model = RwYang.Model.create_libncx()
+    model.load_schema_ypbc(RwTl.get_schema())
+    # create logger 
+    logger = logging.getLogger('VM Network Topology')
+    logger.setLevel(logging.DEBUG)
+    logging.basicConfig(level=logging.DEBUG)
+
+    logger.info('Creating an instance of VM Network Topology')
+
+    nwtop = RwTl.YangData_IetfNetwork()
+
+    # Setup L2 topology
+    l2top = MyL2Topology(nwtop, logger)
+    l2top.setup_all()
+
+    # Setup Provider network topology
+    provtop = MyProvTopology(nwtop, l2top, logger)
+    provtop.setup_all()
+
+    # Setup VM network topology
+    vmtop = MyVMTopology(nwtop, l2top, provtop, logger)
+    vmtop.setup_all()
+
+    print ("Converting to XML")
+    # Convert l2nw network to XML
+    xml_str = nwtop.to_xml_v2(model)
+    tree = etree.XML(xml_str)
+    xml_file = "/tmp/stacked_vmtop.xml"
+    xml_formatted_file = "/tmp/stacked_vmtop2.xml"
+    with open(xml_file, "w") as f:
+        f.write(xml_str)
+    status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True)
+
+    status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True)
+
+    print ("Converting to JSON ")
+    # Convert set of topologies to JSON
+    json_str = nwtop.to_json(model)
+    with open("/tmp/stacked_vmtop.json", "w") as f:
+        f.write(json_str)
+    status = subprocess.call("python -m json.tool /tmp/stacked_vmtop.json > /tmp/stacked_vmtop2.json", shell=True)
+    json_formatted_file = "/tmp/stacked_vmtop2.json"
+    status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True)
+    status = subprocess.call("sed -i -e 's/\"l2t:vlan\"/\"vlan\"/g' " + json_formatted_file, shell=True)
+
diff --git a/rwlaunchpad/plugins/rwvns/test/create_stackedl2topology.py b/rwlaunchpad/plugins/rwvns/test/create_stackedl2topology.py
new file mode 100644
index 0000000..3ae3e80
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/test/create_stackedl2topology.py
@@ -0,0 +1,261 @@
+#!/bin/python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import gi
+gi.require_version('RwYang', '1.0')
+from gi.repository import IetfL2TopologyYang as l2Tl
+from gi.repository import RwTopologyYang as RwTl
+from gi.repository import RwYang
+from xml.etree import ElementTree as etree
+import subprocess
+import logging
+
+
+class MyL2Network(object):
+    def __init__(self, nwtop, log):
+        self.next_mac = 11
+        self.log = log
+        self.nwtop = nwtop
+        self.l2net1 = nwtop.network.add()
+        self.l2net1.network_id = "L2HostNetwork-1"
+
+        # L2 Network type augmentation
+        self.l2net1.network_types.l2_network = self.l2net1.network_types.l2_network.new()
+        # L2 Network augmentation
+        self.l2net1.l2_network_attributes.name = "Rift LAB SFC-Demo Host Network"
+
+    def get_nw_id(self, nw_name):
+        for nw in self.nwtop.network:
+            if (nw.network_id == nw_name):
+                return nw.network_id
+
+    def get_nw(self, nw_name):
+        for nw in self.nwtop.network:
+            if (nw.network_id == nw_name):
+                return nw
+
+    def get_node(self, node_name):
+        _node_id = "urn:Rift:Lab:" + node_name
+        for node in self.l2net1.node:
+            if (node.node_id == _node_id):
+                return node
+
+    def get_tp(self, node, tp_name):
+        _tp_id = node.node_id + "_" + tp_name
+        for tp in node.termination_point :
+            if (tp.tp_id == _tp_id):
+                return tp
+
+    def get_link(self, link_name):
+        for link in nw.link :
+            if (link.l2_link_attributes.name == link_name):
+                return link
+
+    def create_node(self, node_name, mgmt_ip_addr, description):
+        logging.debug("Creating node %s", node_name)
+        node = self.l2net1.node.add()
+        node.node_id = "urn:Rift:Lab:" + node_name
+        # L2 Node augmentation
+        node.l2_node_attributes.name = node_name
+        node.l2_node_attributes.description = description
+        node.l2_node_attributes.management_address.append(mgmt_ip_addr)
+        return node
+
+    def create_tp(self, node, cfg_tp):
+        logging.debug("    Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp)
+        tp = node.termination_point.add()
+        tp.tp_id = ("{}_{}").format(node.node_id, cfg_tp)
+        # L2 TP augmentation
+        tp.l2_termination_point_attributes.description = cfg_tp
+        tp.l2_termination_point_attributes.maximum_frame_size = 1500
+        tp.l2_termination_point_attributes.mac_address = "00:1e:67:d8:48:" + str(self.next_mac)
+        self.next_mac = self.next_mac + 1
+        tp.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet"
+        return tp
+
+    def create_bidir_link(self, node1, tp1, node2, tp2, link_name1, link_name2):
+        logging.debug("Creating links %s %s", link_name1, link_name2)
+        lnk1= self.l2net1.link.add()
+        lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description)
+        lnk1.source.source_node = node1.node_id
+        lnk1.source.source_tp = tp1.tp_id
+        lnk1.destination.dest_node = node2.node_id
+        lnk1.destination.dest_tp = tp2.tp_id
+        # L2 link augmentation
+        lnk1.l2_link_attributes.name = link_name1
+        #lnk1.l2_link_attributes.rate = 1000000000.00
+
+        lnk2= self.l2net1.link.add()
+        lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description)
+        lnk2.source.source_node = node2.node_id
+        lnk2.source.source_tp = tp2.tp_id
+        lnk2.destination.dest_node = node1.node_id
+        lnk2.destination.dest_tp = tp1.tp_id
+        # L2 link augmentation
+        lnk2.l2_link_attributes.name = link_name2
+        #lnk2.l2_link_attributes.rate = 1000000000.00
+        return lnk1, lnk2
+
+class MyL2Topology(MyL2Network):
+    def __init__(self, nwtop, log):
+        super(MyL2Topology, self).__init__(nwtop, log)
+
+    def find_nw_id(self, nw_name):
+        return self.get_nw_id(nw_name)
+
+    def find_nw(self, nw_name):
+        return self.get_nw(nw_name)
+
+    def find_node(self, node_name):
+        return self.get_node(node_name)
+
+    def find_tp(self, node, tp_name):
+        return self.get_tp(node, tp_name)
+
+    def find_link(self, link_name):
+        return self.get_link(link_name)
+
+    def setup_nodes(self):
+        self.g118 = self.create_node("Grunt118","10.66.4.118", "Host with OVS and PCI")
+        self.g44 = self.create_node("Grunt44","10.66.4.44", "Host with OVS-DPDK")
+        self.g120 = self.create_node("Grunt120","10.66.4.120", "Host with OVS and PCI")
+        self.hms = self.create_node("HostMgmtSwitch","10.66.4.98", "Switch for host eth0")
+        self.vms = self.create_node("VMMgmtSwitch","10.66.4.55", "Switch for VMs eth0")
+        self.ads = self.create_node("AristaDPSwitch","10.66.4.90", "10 Gbps Switch")
+
+    def setup_tps(self):
+        self.g118_e0 = self.create_tp(self.g118, "eth0")
+        self.g118_e1 = self.create_tp(self.g118, "eth1")
+        self.g118_e2 = self.create_tp(self.g118, "eth2")
+
+        self.g44_e0 = self.create_tp(self.g44, "eth0")
+        self.g44_e1 = self.create_tp(self.g44, "eth1")
+        self.g44_e2 = self.create_tp(self.g44, "eth2")
+        self.g44_e3 = self.create_tp(self.g44, "eth3")
+
+        self.g120_e0 = self.create_tp(self.g120, "eth0")
+        self.g120_e1 = self.create_tp(self.g120, "eth1")
+        self.g120_e2 = self.create_tp(self.g120, "eth2")
+
+        self.hms_e1 = self.create_tp(self.hms, "eth1")
+        self.hms_e2 = self.create_tp(self.hms, "eth2")
+        self.hms_e3 = self.create_tp(self.hms, "eth3")
+
+        self.vms_e1 = self.create_tp(self.vms, "eth1")
+        self.vms_e2 = self.create_tp(self.vms, "eth2")
+        self.vms_e3 = self.create_tp(self.vms, "eth3")
+
+        self.ads_57 = self.create_tp(self.ads, "Card_5:Port_7")
+        self.ads_58 = self.create_tp(self.ads, "Card_8:Port_8")
+        self.ads_47 = self.create_tp(self.ads, "Card_4:Port_7")
+        self.ads_48 = self.create_tp(self.ads, "Card_4:Port_8")
+
+    def setup_links(self):
+        # Add links to l2net1 network
+        # These links are unidirectional and point-to-point
+        # Bidir Links for Grunt118
+        self.create_bidir_link(self.g118, self.g118_e0, self.hms, self.hms_e1, "Link_g118_e0_hms_e1", "Link_hms_e1_g118_e0")
+        self.create_bidir_link(self.g118, self.g118_e1, self.vms, self.vms_e1, "Link_g118_e1_vms_e1", "Link_vms_e1_g118_e1")
+        self.create_bidir_link(self.g118, self.g118_e2, self.ads, self.ads_57, "Link_g118_e2_ads_47", "Link_ads_47_g118_e2")
+        # Bidir Links for Grunt44
+        self.create_bidir_link(self.g44, self.g44_e0, self.hms, self.hms_e2, "Link_g44_e0_hms_e1", "Link_hms_e1_g44_e0")
+        self.create_bidir_link(self.g44, self.g44_e1, self.vms, self.vms_e2, "Link_g44_e1_vms_e1", "Link_vms_e1_g44_e1")
+        self.create_bidir_link(self.g44, self.g44_e2, self.ads, self.ads_47, "Link_g44_e2_ads_47", "Link_ads_47_g44_e2")
+        self.create_bidir_link(self.g44, self.g44_e3, self.ads, self.ads_48, "Link_g44_e3_ads_48", "Link_ads_48_g44_e3")
+        # Bidir Links for Grunt120
+        self.create_bidir_link(self.g120, self.g120_e0, self.hms, self.hms_e3, "Link_g120_e0_hms_e1", "Link_hms_e1_g120_e0")
+        self.create_bidir_link(self.g120, self.g120_e1, self.vms, self.vms_e3, "Link_g120_e1_vms_e1", "Link_vms_e1_g120_e1")
+        self.create_bidir_link(self.g120, self.g120_e2, self.ads, self.ads_58, "Link_g120_e2_ads_58", "Link_ads_58_g120_e2")
+
+    def setup_all(self):
+        self.setup_nodes()
+        self.setup_tps()
+        self.setup_links()
+
+def adjust_xml_file(infile, outfile, begin_marker, end_marker):
+    buffer = []
+    in_block = False
+    max_interesting_line_toread = 1
+    interesting_line = 0
+    with open(infile) as inf:
+        with open(outfile, 'w') as outf:
+            for line in inf:
+                if begin_marker in line:
+                    in_block = True
+                    # Go down
+                if end_marker in line:
+                    assert in_block is True
+                    print("End of gathering line...", line)
+                    buffer.append(line)  # gather lines
+                    interesting_line = max_interesting_line_toread
+                    in_block = False
+                    continue
+                if interesting_line:
+                    print("Interesting line printing ...", line)
+                    outf.write(line)
+                    interesting_line -= 1
+                    if interesting_line == 0:  # output gathered lines
+                        for lbuf in buffer:
+                            outf.write(lbuf)
+                        buffer = []  # empty buffer 
+                        print("\n\n")
+                    continue
+
+                if in_block:
+                    print("Gathering line...", line)
+                    buffer.append(line)  # gather lines
+                else:
+                    outf.write(line)
+
+if __name__ == "__main__":
+    model = RwYang.Model.create_libncx()
+    model.load_schema_ypbc(RwTl.get_schema())
+    # create logger 
+    logger = logging.getLogger(__file__)
+    logger.setLevel(logging.DEBUG)
+    logging.basicConfig(level=logging.DEBUG)
+
+    logging.info('Creating an instance of L2 Host Topology')
+    nwtop = RwTl.YangData_IetfNetwork()
+
+    l2top = MyL2Topology(nwtop, logger)
+    l2top.setup_all()
+
+    logging.info ("Converting to XML")
+    # Convert l2nw network to XML
+    xml_str = nwtop.to_xml_v2(model)
+    tree = etree.XML(xml_str)
+    xml_file = "/tmp/stacked_top.xml"
+    xml_formatted_file = "/tmp/stacked_top2.xml"
+    with open(xml_file, "w") as f:
+        f.write(xml_str)
+    status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True)
+
+    logging.info ("Converting to JSON")
+    # Convert set of topologies to JSON
+    json_str = nwtop.to_json(model)
+    with open("/tmp/stacked_top.json", "w") as f:
+        f.write(json_str)
+    status = subprocess.call("python -m json.tool /tmp/stacked_top.json > /tmp/stacked_top2.json", shell=True)
+    json_formatted_file = "/tmp/stacked_top2.json"
+    status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True)
+
diff --git a/rwlaunchpad/plugins/rwvns/test/test_sdn_mock.py b/rwlaunchpad/plugins/rwvns/test/test_sdn_mock.py
new file mode 100644
index 0000000..45e2e80
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/test/test_sdn_mock.py
@@ -0,0 +1,101 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import datetime
+import logging
+import unittest
+
+import rw_peas
+import rwlogger
+
+from gi.repository import RwsdnYang
+import gi
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwSdn', '1.0')
+from gi.repository import RwcalYang
+from gi.repository import IetfNetworkYang
+from gi.repository.RwTypes import RwStatus
+
+
+logger = logging.getLogger('mock')
+
+def get_sdn_account():
+    """
+    Creates an object for class RwsdnYang.SdnAccount()
+    """
+    account                 = RwsdnYang.SDNAccount()
+    account.account_type    = "mock"
+    account.mock.username   = "rift"
+    account.mock.plugin_name = "rwsdn_mock"
+    return account
+
+def get_sdn_plugin():
+    """
+    Loads rw.sdn plugin via libpeas
+    """
+    plugin = rw_peas.PeasPlugin('rwsdn_mock', 'RwSdn-1.0')
+    engine, info, extension = plugin()
+
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("SDN-Log")
+
+    sdn = plugin.get_interface("Topology")
+    try:
+        rc = sdn.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except:
+        logger.error("ERROR:SDN plugin instantiation failed. Aborting tests")
+    else:
+        logger.info("Mock SDN plugin successfully instantiated")
+    return sdn
+
+
+
+class SdnMockTest(unittest.TestCase):
+    def setUp(self):
+        """
+          Initialize test plugins
+        """
+        self._acct = get_sdn_account()
+        logger.info("Mock-SDN-Test: setUp")
+        self.sdn   = get_sdn_plugin()
+        logger.info("Mock-SDN-Test: setUpEND")
+
+    def tearDown(self):
+        logger.info("Mock-SDN-Test: Done with tests")
+
+    def test_get_network_list(self):
+        """
+           First test case
+        """
+        rc, nwtop = self.sdn.get_network_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS) 
+        logger.debug("SDN-Mock-Test: Retrieved network attributes ")
+        for nw in nwtop.network:
+           logger.debug("...Network id %s", nw.network_id)
+           logger.debug("...Network name %s", nw.l2_network_attributes.name)
+           print(nw)
+
+
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.DEBUG)
+    unittest.main()
+
+
+
+
diff --git a/rwlaunchpad/plugins/rwvns/test/test_sdn_sim.py b/rwlaunchpad/plugins/rwvns/test/test_sdn_sim.py
new file mode 100644
index 0000000..d216f0d
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/test/test_sdn_sim.py
@@ -0,0 +1,99 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import datetime
+import logging
+import unittest
+
+import rw_peas
+import rwlogger
+
+import gi
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwSdn', '1.0')
+from gi.repository import RwsdnYang
+from gi.repository import IetfNetworkYang
+from gi.repository.RwTypes import RwStatus
+from gi.repository import RwSdn
+
+
+logger = logging.getLogger('sdnsim')
+
+def get_sdn_account():
+    """
+    Creates an object for class RwsdnYang.SdnAccount()
+    """
+    account                 = RwsdnYang.SDNAccount()
+    account.account_type    = "sdnsim"
+    account.sdnsim.username   = "rift"
+    account.sdnsim.plugin_name = "rwsdn_sim"
+    return account
+
+def get_sdn_plugin():
+    """
+    Loads rw.sdn plugin via libpeas
+    """
+    plugin = rw_peas.PeasPlugin('rwsdn_sim', 'RwSdn-1.0')
+    engine, info, extension = plugin()
+
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("SDN-Log")
+
+    sdn = plugin.get_interface("Topology")
+    try:
+        rc = sdn.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except:
+        logger.error("ERROR:SDN sim plugin instantiation failed. Aborting tests")
+    else:
+        logger.info("SDN sim plugin successfully instantiated")
+    return sdn
+
+
+
+class SdnSimTest(unittest.TestCase):
+    def setUp(self):
+        """
+          Initialize test plugins
+        """
+        self._acct = get_sdn_account()
+        logger.info("SDN-Sim-Test: setUp")
+        self.sdn   = get_sdn_plugin()
+        logger.info("SDN-Sim-Test: setUpEND")
+
+    def tearDown(self):
+        logger.info("SDN-Sim-Test: Done with tests")
+
+    def test_get_network_list(self):
+        """
+           First test case
+        """
+        rc, nwtop = self.sdn.get_network_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS) 
+        logger.debug("SDN-Sim-Test: Retrieved network attributes ")
+        for nw in nwtop.network:
+           logger.debug("...Network id %s", nw.network_id)
+           logger.debug("...Network name %s", nw.l2_network_attributes.name)
+
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.DEBUG)
+    unittest.main()
+
+
+
+
diff --git a/rwlaunchpad/plugins/rwvns/test/test_top_datastore.py b/rwlaunchpad/plugins/rwvns/test/test_top_datastore.py
new file mode 100644
index 0000000..d6c1313
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/test/test_top_datastore.py
@@ -0,0 +1,734 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import datetime
+import logging
+import unittest
+
+import rwlogger
+
+# from gi.repository import IetfNetworkYang
+from gi.repository import IetfL2TopologyYang as l2Tl
+from gi.repository import RwTopologyYang as RwTl
+# from gi.repository.RwTypes import RwStatus
+
+from create_stackedl2topology import MyL2Topology
+
+from rift.topmgr import (
+    NwtopDataStore,
+)
+logger = logging.getLogger('sdntop')
+
+NUM_NWS = 1
+NUM_NODES_L2_NW = 6
+NUM_TPS_L2_NW = 20
+NUM_LINKS = 20
+
+class SdnTopStoreNetworkTest(unittest.TestCase):
+    def setUp(self):
+        """
+          Initialize Top data store
+        """
+        self._nwtopdata_store = NwtopDataStore(logger)
+        self.test_nwtop = RwTl.YangData_IetfNetwork()
+
+        self.l2top = MyL2Topology(self.test_nwtop, logger)
+        self.l2top.setup_all()
+
+        # Get initial test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        # Create initial nw
+        self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1)
+
+        # Add test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        assert self.l2net1 is not None
+        self.new_l2net = RwTl.YangData_IetfNetwork_Network()
+        self.new_l2net.network_id = "L2HostNetwork-2"
+        logger.info("SdnTopStoreNetworkTest: setUp")
+
+    def tearDown(self):
+        self.l2net1 = None
+        self.new_l2net = None
+        logger.info("SdnTopStoreNetworkTest: Done with tests")
+
+    def test_create_network(self):
+        """
+           Test: Create first l2 network
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreNetworkTest: Create network ")
+        # Get test data
+        # Created durign setup phase
+        assert self.l2net1 is not None
+        # Use data store APIs
+        # Network already stored
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        logger.debug("...Network id %s", nw.network_id)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        logger.debug("...Network name %s", nw.l2_network_attributes.name)
+        for node in nw.node:
+            logger.debug("...Node id %s", node.node_id)
+            num_nodes += 1
+            for tp in node.termination_point:
+                logger.debug("...Tp id %s", tp.tp_id)
+                num_tps += 1
+        self.assertEqual(num_nodes, NUM_NODES_L2_NW)
+        self.assertEqual(num_tps, NUM_TPS_L2_NW)
+
+
+    def test_add_network(self):
+        """
+           Test: Add another network, Check network id
+        """
+        logger.debug("SdnTopStoreNetworkTest: Add network ")
+        # Use data store APIs
+        self._nwtopdata_store.create_network("L2HostNetwork-2", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-2")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-2")
+        self.assertEqual(len(self._nwtopdata_store._networks), 2)
+
+    def test_add_networktype(self):
+        """
+           Test: Add another network, Check network type
+        """
+        logger.debug("SdnTopStoreTest: Add network type ")
+        # Use data store APIs
+        self._nwtopdata_store.create_network("L2HostNetwork-2", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-2")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-2")
+        self.assertEqual(len(self._nwtopdata_store._networks), 2)
+        # Add new test data
+        self.new_l2net.network_types.l2_network = self.new_l2net.network_types.l2_network.new()
+        logger.debug("Adding update l2net..%s", self.new_l2net)
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-2", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-2")
+        self.assertIsNotNone(nw.network_types.l2_network)
+
+    def test_add_networkl2name(self):
+        """
+           Test: Add another network, Check L2 network name
+        """
+        logger.debug("SdnTopStoreTest: Add L2 network name ")
+        # Use data store APIs
+        self.new_l2net.network_types.l2_network = self.new_l2net.network_types.l2_network.new()
+        self._nwtopdata_store.create_network("L2HostNetwork-2", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-2")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-2")
+        self.assertEqual(len(self._nwtopdata_store._networks), 2)
+        # Add new test data
+        self.new_l2net.l2_network_attributes.name = "L2networkName"
+        logger.debug("Adding update l2net..%s", self.new_l2net)
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-2", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-2")
+        self.assertEqual(nw.l2_network_attributes.name, "L2networkName")
+
+
+class SdnTopStoreNetworkNodeTest(unittest.TestCase):
+    def setUp(self):
+        """
+          Initialize Top data store
+        """
+        self._nwtopdata_store = NwtopDataStore(logger)
+        self.test_nwtop = RwTl.YangData_IetfNetwork()
+
+        self.l2top = MyL2Topology(self.test_nwtop, logger)
+        self.l2top.setup_all()
+
+        # Get initial test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        # Create initial nw
+        self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1)
+        # Get test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        assert self.l2net1 is not None
+        self.new_l2net = RwTl.YangData_IetfNetwork_Network()
+        self.new_l2net.network_id = "L2HostNetwork-1"
+        self.node2 = self.new_l2net.node.add()
+        self.node2.node_id = "TempNode2"
+        logger.info("SdnTopStoreTest: setUp NetworkNodetest")
+
+    def tearDown(self):
+        logger.info("SdnTopStoreTest: Done with  NetworkNodetest")
+
+
+    def test_add_network_node(self):
+        """
+           Test: Add a node to existing network
+                 Test all parameters
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Add network node")
+        # Add test data
+        self.node2.node_id = "TempNode2"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+
+    #@unittest.skip("Skipping")
+    def test_update_network_node(self):
+        """
+           Test: Updat a node to existing network
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Update network node")
+        # Add test data
+        self.node2.node_id = "TempNode2"
+        self.node2.l2_node_attributes.description = "TempNode2 desc"
+        self.node2.l2_node_attributes.name = "Nice Name2"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.description, "TempNode2 desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name2")
+
+    #@unittest.skip("Skipping")
+    def test_update_network_node_l2attr1(self):
+        """
+           Test: Update a node to existing network
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Update network node")
+        # Add test data
+        self.node2.node_id = "TempNode2"
+        self.node2.l2_node_attributes.description = "TempNode2 desc"
+        self.node2.l2_node_attributes.name = "Nice Name3"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.description, "TempNode2 desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3")
+
+        # Add test data
+        self.node2.l2_node_attributes.name = "Nice Name4"
+        logger.debug("Network %s", self.new_l2net)
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        logger.debug("Node %s", nw.node[NUM_NODES_L2_NW])
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name4")
+
+    def test_update_network_node_l2attr2(self):
+        """
+           Test: Updat a node to existing network
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Update network node")
+        # Add test data
+        self.node2.node_id = "TempNode2"
+        self.node2.l2_node_attributes.description = "TempNode2 desc"
+        self.node2.l2_node_attributes.name = "Nice Name3"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.description, "TempNode2 desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3")
+
+        # Add test data
+        self.node2.l2_node_attributes.management_address.append("10.0.0.1")
+        logger.debug("Network %s", self.new_l2net)
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].l2_node_attributes.management_address), 1)
+
+        # Add test data
+        self.node2.l2_node_attributes.management_address.append("10.0.0.2")
+        logger.debug("Network %s", self.new_l2net)
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].l2_node_attributes.management_address), 2)
+
+
+class SdnTopStoreNetworkNodeTpTest(unittest.TestCase):
+    def setUp(self):
+        """
+          Initialize Top data store
+        """
+        self._nwtopdata_store = NwtopDataStore(logger)
+        self.test_nwtop = RwTl.YangData_IetfNetwork()
+
+        self.l2top = MyL2Topology(self.test_nwtop, logger)
+        self.l2top.setup_all()
+
+        # Get initial test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        # Create initial nw
+        self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1)
+        # Get test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        assert self.l2net1 is not None
+        self.new_l2net = RwTl.YangData_IetfNetwork_Network()
+        self.new_l2net.network_id = "L2HostNetwork-1"
+        self.node2 = self.new_l2net.node.add()
+        self.node2.node_id = "TempNode2"
+        self.tp1 = self.node2.termination_point.add()
+        self.tp1.tp_id = "TempTp1"
+        logger.info("SdnTopStoreTest: setUp NetworkNodeTptest")
+
+    def tearDown(self):
+        logger.info("SdnTopStoreTest: Done with  NetworkNodeTptest")
+        
+        self.new_l2net = None
+        self.node2 = None
+        self.tp1 = None
+
+    def test_add_network_node_tp(self):
+        """
+           Test: Add a node to existing network
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Update network ")
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+
+    def test_update_network_node_tp(self):
+        """
+           Test: Update a tp to existing network, add all tp elements
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Update network ")
+        self.tp1.tp_id = "TempTp1"
+        self.tp1.l2_termination_point_attributes.description = "TempTp1 Desc"
+        self.tp1.l2_termination_point_attributes.maximum_frame_size = 1296
+        self.tp1.l2_termination_point_attributes.mac_address = "00:1e:67:98:28:01"
+        self.tp1.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1296)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:01")
+
+    def test_update_network_node_tp2(self):
+        """
+           Test: Update a tp to existing network, change tp elements
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Update network ")
+        self.tp1.tp_id = "TempTp1"
+        self.tp1.l2_termination_point_attributes.description = "TempTp1 Desc"
+        self.tp1.l2_termination_point_attributes.maximum_frame_size = 1296
+        self.tp1.l2_termination_point_attributes.mac_address = "00:1e:67:98:28:01"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1296)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:01")
+
+        # Change frame size
+        self.tp1.l2_termination_point_attributes.maximum_frame_size = 1396
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1396)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:01")
+
+        # Change MAC address
+        self.tp1.l2_termination_point_attributes.mac_address = "00:1e:67:98:28:02"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1396)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:02")
+
+        # Add encapsulation type
+        self.tp1.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1396)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:02")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.eth_encapsulation, "l2t:ethernet")
+
+    def test_update_extra_network_node_tp2(self):
+        """
+           Test: Update a tp to existing network, change tp elements
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Update network ")
+        self.tp2 = self.node2.termination_point.add()
+        self.tp2.tp_id = "TempTp2"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 2)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[1].tp_id, "TempTp2")
+
+
+
+class SdnTopStoreNetworkLinkTest(unittest.TestCase):
+    def setUp(self):
+        """
+          Initialize Top data store
+        """
+        self._nwtopdata_store = NwtopDataStore(logger)
+        self.test_nwtop = RwTl.YangData_IetfNetwork()
+
+        self.l2top = MyL2Topology(self.test_nwtop, logger)
+        self.l2top.setup_all()
+
+        # Get initial test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        # Create initial nw
+        self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1)
+        # Get test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        assert self.l2net1 is not None
+        self.new_l2net = RwTl.YangData_IetfNetwork_Network()
+        self.new_l2net.network_id = "L2HostNetwork-1"
+
+        self.src_node = self.new_l2net.node.add()
+        self.src_node.node_id = "TempNode1"
+        self.tp1 = self.src_node.termination_point.add()
+        self.tp1.tp_id = "TempTp1"
+
+        self.dest_node = self.new_l2net.node.add()
+        self.dest_node.node_id = "TempNode2"
+        self.tp2 = self.dest_node.termination_point.add()
+        self.tp2.tp_id = "TempTp2"
+        logger.info("SdnTopStoreTest: setUp NetworkLinkTest")
+
+    def tearDown(self):
+        logger.info("SdnTopStoreTest: Done with  NetworkLinkTest")
+        
+        self.new_l2net = None
+        self.src_node = None
+        self.tp1 = None
+        self.dest_node = None
+        self.tp2 = None
+
+    def test_add_network_link(self):
+        """
+           Test: Add a link to existing network
+        """
+        logger.info("SdnTopStoreTest: Update network link")
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2")
+        self.assertEqual(len(nw.link), NUM_LINKS )
+        self.link1 = self.new_l2net.link.add()
+        self.link1.link_id = "Link1"
+        self.link1.source.source_node = self.src_node.node_id
+        self.link1.source.source_tp = self.tp1.tp_id
+        self.link1.destination.dest_node = self.dest_node.node_id
+        self.link1.destination.dest_tp = self.tp2.tp_id
+        # Use data store APIs
+        logger.info("SdnTopStoreTest: Update network link - Part 2")
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        # Verify data created
+        self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1")
+        self.assertEqual(nw.link[NUM_LINKS].source.source_node, self.src_node.node_id)
+        self.assertEqual(nw.link[NUM_LINKS].source.source_tp, self.tp1.tp_id)
+        self.assertEqual(nw.link[NUM_LINKS].destination.dest_node, self.dest_node.node_id)
+        self.assertEqual(nw.link[NUM_LINKS].destination.dest_tp, self.tp2.tp_id)
+        self.assertEqual(len(nw.link), NUM_LINKS + 1)
+
+    def test_add_extra_network_link(self):
+        """
+           Test: Add a link to existing network
+        """
+        logger.info("SdnTopStoreTest: Update extra network link")
+        # Create initial state
+        self.link1 = self.new_l2net.link.add()
+        self.link1.link_id = "Link1"
+        self.link1.source.source_node = self.src_node.node_id
+        self.link1.source.source_tp = self.tp1.tp_id
+        self.link1.destination.dest_node = self.dest_node.node_id
+        self.link1.destination.dest_tp = self.tp2.tp_id
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify initial state
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2")
+        self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1")
+        self.assertEqual(len(nw.link), NUM_LINKS  + 1)
+
+        # Add extra link (reverse)
+        self.link2 = self.new_l2net.link.add()
+        self.link2.link_id = "Link2"
+        self.link2.source.source_node = self.dest_node.node_id
+        self.link2.source.source_tp = self.tp2.tp_id
+        self.link2.destination.dest_node = self.src_node.node_id
+        self.link2.destination.dest_tp = self.tp1.tp_id
+        # Use data store APIs
+        logger.info("SdnTopStoreTest: Update extra network link - Part 2")
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        # Verify data created
+        self.assertEqual(nw.link[NUM_LINKS+1].link_id, "Link2")
+        self.assertEqual(len(nw.link), NUM_LINKS + 2)
+        self.assertEqual(nw.link[NUM_LINKS+1].source.source_node, self.dest_node.node_id)
+        self.assertEqual(nw.link[NUM_LINKS+1].source.source_tp, self.tp2.tp_id)
+        self.assertEqual(nw.link[NUM_LINKS+1].destination.dest_node, self.src_node.node_id)
+        self.assertEqual(nw.link[NUM_LINKS+1].destination.dest_tp, self.tp1.tp_id)
+
+    def test_add_network_link_l2attr(self):
+        """
+           Test: Check L2 link attributes
+        """
+        logger.info("SdnTopStoreTest: Add network link L2 attributes")
+        # Create test state
+        self.link1 = self.new_l2net.link.add()
+        self.link1.link_id = "Link1"
+        self.link1.source.source_node = self.src_node.node_id
+        self.link1.source.source_tp = self.tp1.tp_id
+        self.link1.destination.dest_node = self.dest_node.node_id
+        self.link1.destination.dest_tp = self.tp2.tp_id
+        self.link1.l2_link_attributes.name = "Link L2 name"
+        self.link1.l2_link_attributes.rate = 10000
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data state
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2")
+        self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1")
+        self.assertEqual(len(nw.link), NUM_LINKS  + 1)
+        self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 name")
+        self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.rate, 10000)
+
+    def test_change_network_link_l2attr(self):
+        """
+           Test: Change L2 link attributes
+        """
+        logger.info("SdnTopStoreTest: Change network link L2 attributes")
+        # Create initial state
+        self.link1 = self.new_l2net.link.add()
+        self.link1.link_id = "Link1"
+        self.link1.source.source_node = self.src_node.node_id
+        self.link1.source.source_tp = self.tp1.tp_id
+        self.link1.destination.dest_node = self.dest_node.node_id
+        self.link1.destination.dest_tp = self.tp2.tp_id
+        self.link1.l2_link_attributes.name = "Link L2 name"
+        self.link1.l2_link_attributes.rate = 10000
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify initial state
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2")
+        self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1")
+        self.assertEqual(len(nw.link), NUM_LINKS  + 1)
+        self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 name")
+        self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.rate, 10000)
+
+        # Create initial state
+        self.test_l2net = RwTl.YangData_IetfNetwork_Network()
+        self.test_l2net.network_id = "L2HostNetwork-1"
+        self.link1 = self.test_l2net.link.add()
+        self.link1.link_id = "Link1"
+        self.link1.l2_link_attributes.name = "Link L2 updated name"
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.test_l2net)
+        # Verify test state
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 updated name")
+
+    def test_change_network_link_dest_tp(self):
+        """
+           Test: Change L2 link attributes
+        """
+        logger.info("SdnTopStoreTest: Change network link dest-tp")
+        # Create initial state
+        self.link1 = self.new_l2net.link.add()
+        self.link1.link_id = "Link1"
+        self.link1.source.source_node = self.src_node.node_id
+        self.link1.source.source_tp = self.tp1.tp_id
+        self.link1.destination.dest_node = self.dest_node.node_id
+        self.link1.destination.dest_tp = self.tp2.tp_id
+        self.link1.l2_link_attributes.name = "Link L2 name"
+        self.link1.l2_link_attributes.rate = 10000
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify initial state
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2")
+        self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1")
+        self.assertEqual(len(nw.link), NUM_LINKS  + 1)
+        self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 name")
+        self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.rate, 10000)
+
+        # Create test state
+        self.test_l2net = RwTl.YangData_IetfNetwork_Network()
+        self.test_l2net.network_id = "L2HostNetwork-1"
+        self.link1 = self.test_l2net.link.add()
+        self.link1.link_id = "Link1"
+        # Changing dest node params
+        self.link1.destination.dest_node = self.src_node.node_id
+        self.link1.destination.dest_tp = self.tp1.tp_id
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.test_l2net)
+        # Verify test state
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(nw.link[NUM_LINKS].destination.dest_node,  self.src_node.node_id)
+
+
+
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.INFO)
+    unittest.main()
+
+
+
+
diff --git a/rwlaunchpad/plugins/rwvns/test/topmgr_module_test.py b/rwlaunchpad/plugins/rwvns/test/topmgr_module_test.py
new file mode 100755
index 0000000..44e2f5c
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/test/topmgr_module_test.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import asyncio
+import logging
+import os
+import sys
+import types
+import unittest
+import uuid
+import random
+
+import xmlrunner
+
+import gi
+gi.require_version('CF', '1.0')
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwMain', '1.0')
+gi.require_version('RwManifestYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+import gi.repository.CF as cf
+import gi.repository.RwDts as rwdts
+import gi.repository.RwMain as rwmain
+import gi.repository.RwManifestYang as rwmanifest
+import gi.repository.IetfL2TopologyYang as l2Tl
+import gi.repository.RwTopologyYang as RwTl
+import gi.repository.RwLaunchpadYang as launchpadyang
+from gi.repository import RwsdnYang
+from gi.repository.RwTypes import RwStatus
+
+from create_stackedl2topology import MyL2Topology
+from create_stackedProvNettopology import MyProvTopology
+from create_stackedVMNettopology import MyVMTopology
+from create_stackedSfctopology import MySfcTopology
+
+import rw_peas
+import rift.tasklets
+import rift.test.dts
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class TopMgrTestCase(rift.test.dts.AbstractDTSTest):
+
+    @classmethod
+    def configure_suite(cls, rwmain):
+        vns_mgr_dir = os.environ.get('VNS_MGR_DIR')
+
+        cls.rwmain.add_tasklet(vns_mgr_dir, 'rwvnstasklet')
+
+    @classmethod
+    def configure_schema(cls):
+        return RwTl.get_schema()
+        
+    @asyncio.coroutine
+    def wait_tasklets(self):
+        yield from asyncio.sleep(1, loop=self.loop)
+
+    @classmethod
+    def configure_timeout(cls):
+        return 360
+
+
+    @asyncio.coroutine
+    def configure_l2_network(self, dts):
+        nwtop = RwTl.YangData_IetfNetwork()
+        l2top = MyL2Topology(nwtop, self.log)
+        l2top.setup_all()
+        nw_xpath = "C,/nd:network"
+        self.log.info("Configuring l2 network: %s",nwtop)
+        yield from dts.query_create(nw_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    nwtop)
+
+    @asyncio.coroutine
+    def configure_prov_network(self, dts):
+        nwtop = RwTl.YangData_IetfNetwork()
+        l2top = MyL2Topology(nwtop, self.log)
+        l2top.setup_all()
+
+        provtop = MyProvTopology(nwtop, l2top, self.log)
+        provtop.setup_all()
+        nw_xpath = "C,/nd:network"
+        self.log.info("Configuring provider network: %s",nwtop)
+        yield from dts.query_create(nw_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    nwtop)
+
+    @asyncio.coroutine
+    def configure_vm_network(self, dts):
+        nwtop = RwTl.YangData_IetfNetwork()
+        l2top = MyL2Topology(nwtop, self.log)
+        l2top.setup_all()
+
+        provtop = MyProvTopology(nwtop, l2top, self.log)
+        provtop.setup_all()
+
+        vmtop = MyVMTopology(nwtop, l2top, provtop, self.log)
+        vmtop.setup_all()
+        nw_xpath = "C,/nd:network"
+        self.log.info("Configuring VM network: %s",nwtop)
+        yield from dts.query_create(nw_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    nwtop)
+
+    @asyncio.coroutine
+    def configure_sfc_network(self, dts):
+        nwtop = RwTl.YangData_IetfNetwork()
+        l2top = MyL2Topology(nwtop, self.log)
+        l2top.setup_all()
+
+        provtop = MyProvTopology(nwtop, l2top, self.log)
+        provtop.setup_all()
+
+        vmtop = MyVMTopology(nwtop, l2top, provtop, self.log)
+        vmtop.setup_all()
+
+        sfctop = MySfcTopology(nwtop, l2top, provtop, vmtop, self.log)
+        sfctop.setup_all()
+
+        nw_xpath = "C,/nd:network"
+        self.log.info("Configuring SFC network: %s",nwtop)
+        yield from dts.query_create(nw_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    nwtop)
+
+
+    #@unittest.skip("Skipping test_network_config")                            
+    def test_network_config(self):
+        self.log.debug("STARTING - test_network_config")
+        tinfo = self.new_tinfo('static_network')
+        dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+
+        @asyncio.coroutine
+        def run_test():
+            networks = []
+            computes = []
+
+            yield from asyncio.sleep(120, loop=self.loop)
+            yield from self.configure_l2_network(dts)
+            yield from self.configure_prov_network(dts)
+            yield from self.configure_vm_network(dts)
+            yield from self.configure_sfc_network(dts)
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+        self.log.debug("DONE - test_network_config")
+
+def main():
+    plugin_dir = os.path.join(os.environ["RIFT_INSTALL"], "usr/lib/rift/plugins")
+
+    if 'VNS_MGR_DIR' not in os.environ:
+        os.environ['VNS_MGR_DIR'] = os.path.join(plugin_dir, 'rwvns')
+
+    if 'MESSAGE_BROKER_DIR' not in os.environ:
+        os.environ['MESSAGE_BROKER_DIR'] = os.path.join(plugin_dir, 'rwmsgbroker-c')
+
+    if 'ROUTER_DIR' not in os.environ:
+        os.environ['ROUTER_DIR'] = os.path.join(plugin_dir, 'rwdtsrouter-c')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    unittest.main(testRunner=runner)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt
new file mode 100644
index 0000000..895ee85
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt
@@ -0,0 +1,73 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Creation Date: 10/28/2015
+# 
+
+##
+# Allow specific compiler warnings
+##
+rift_allow_compiler_warning(unused-but-set-variable)
+
+set(VALA_NAME rwsdn)
+set(VALA_FILES ${VALA_NAME}.vala)
+set(VALA_VERSION 1.0)
+set(VALA_RELEASE 1)
+set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION})
+set(VALA_TYPELIB_PREFIX RwSdn-${VALA_VERSION})
+
+rift_add_vala(
+  ${VALA_LONG_NAME}
+  VALA_FILES ${VALA_FILES}
+  VALA_PACKAGES
+    rw_types-1.0 rw_yang-1.0 rw_keyspec-1.0 rw_yang_pb-1.0 rw_schema_proto-1.0
+    rw_log_yang-1.0 rw_base_yang-1.0 rwcal_yang-1.0 rwsdn_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0
+    ietf_network_yang-1.0 ietf_network_topology_yang-1.0
+    ietf_l2_topology_yang-1.0 rw_topology_yang-1.0
+    rw_log-1.0
+  VAPI_DIRS 
+    ${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
+    ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+    ${RIFT_SUBMODULE_BINARY_ROOT}/rwlaunchpad/plugins/rwvns/yang/
+  GIR_PATHS 
+    ${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
+    ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+    ${RIFT_SUBMODULE_BINARY_ROOT}/rwlaunchpad/plugins/rwvns/yang/
+  GENERATE_HEADER_FILE ${VALA_NAME}.h
+  GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
+  GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
+  GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
+  GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
+  DEPENDS rwcal_yang rwsdn_yang mano_yang rwlog_gi rwschema_yang
+  )
+
+rift_install_vala_artifacts(
+  HEADER_FILES ${VALA_NAME}.h
+  SO_FILES lib${VALA_LONG_NAME}.so
+  VAPI_FILES ${VALA_LONG_NAME}.vapi
+  GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
+  TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
+  COMPONENT ${PKG_LONG_NAME}
+  DEST_PREFIX .
+  )
+
+
+set(subdirs
+  rwsdn_mock
+  rwsdn_sim
+  rwsdn_odl 
+  rwsdn-python
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/CMakeLists.txt
new file mode 100644
index 0000000..f8d8a71
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/CMakeLists.txt
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwsdn-plugin rwsdn-plugin.py)
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/rwsdn-plugin.py b/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/rwsdn-plugin.py
new file mode 100644
index 0000000..374147d
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/rwsdn-plugin.py
@@ -0,0 +1,95 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+
+import gi
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwSdn', '1.0')
+from gi.repository import (
+    GObject,
+    RwSdn, # Vala package
+    RwTypes)
+
+import rw_status
+import rwlogger
+
+import rift.cal
+import rift.sdn
+
+logger = logging.getLogger('rwsdn')
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+                IndexError: RwTypes.RwStatus.NOTFOUND,
+                KeyError: RwTypes.RwStatus.NOTFOUND,
+
+           })
+
+
+class TopologyPlugin(GObject.Object, RwSdn.Topology):
+    def __init__(self):
+      GObject.Object.__init__(self)
+      self._impl = None
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        providers = {
+            "sdnsim": rift.sdn.SdnSim,
+            "mock": rift.sdn.Mock,
+                }
+
+        logger.addHandler(
+            rwlogger.RwLogger(
+                subcategory="rwsdn",
+                log_hdl=rwlog_ctx,
+            )
+        )
+
+        self._impl = {}
+        for name, impl in providers.items():
+            try:
+                self._impl[name] = impl()
+
+            except Exception:
+                msg = "unable to load SDN implementation for {}"
+                logger.exception(msg.format(name))
+
+    @rwstatus
+    def do_get_network_list(self, account, network_top):
+        obj = self._impl[account.account_type]
+        return obj.get_network_list(account, network_top)
+
+def main():
+    @rwstatus
+    def blah():
+        raise IndexError()
+
+    a = blah()
+    assert(a == RwTypes.RwStatus.NOTFOUND)
+
+    @rwstatus({IndexError: RwTypes.RwStatus.NOTCONNECTED})
+    def blah2():
+        """Some function"""
+        raise IndexError()
+
+    a = blah2()
+    assert(a == RwTypes.RwStatus.NOTCONNECTED)
+    assert(blah2.__doc__ == "Some function")
+
+if __name__ == '__main__':
+    main()
+
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn.vala b/rwlaunchpad/plugins/rwvns/vala/rwsdn.vala
new file mode 100644
index 0000000..ec4ab31
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/vala/rwsdn.vala
@@ -0,0 +1,86 @@
+namespace RwSdn {
+
+  public interface Topology: GLib.Object {
+    /*
+     * Init routine
+     */
+    public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx);
+
+    /*
+     * Credential Validation related APIs
+     */
+    public abstract RwTypes.RwStatus validate_sdn_creds(
+      Rwsdn.SDNAccount account,
+      out Rwsdn.SdnConnectionStatus status);
+
+    /*
+     * Configuring  related APIs
+     */
+    /* TODO */
+
+    /*
+     * Network related APIs
+     */
+    public abstract RwTypes.RwStatus get_network_list(
+      Rwsdn.SDNAccount account,
+      out RwTopology.YangData_IetfNetwork network_topology);
+   
+    /*
+     * VNFFG Chain related APIs
+     */
+    public abstract RwTypes.RwStatus create_vnffg_chain(
+      Rwsdn.SDNAccount account,
+      Rwsdn.VNFFGChain vnffg_chain,
+      out string vnffg_id);
+
+    /*
+     * VNFFG Chain Terminate related APIs
+     */
+    public abstract RwTypes.RwStatus terminate_vnffg_chain(
+      Rwsdn.SDNAccount account,
+      string vnffg_id);
+
+
+    /*
+     * Network related APIs
+     */
+    public abstract RwTypes.RwStatus get_vnffg_rendered_paths(
+      Rwsdn.SDNAccount account,
+      out Rwsdn.VNFFGRenderedPaths rendered_paths);
+
+    /*
+     * Classifier related APIs
+     */
+    public abstract RwTypes.RwStatus create_vnffg_classifier(
+      Rwsdn.SDNAccount account,
+      Rwsdn.VNFFGClassifier vnffg_classifier, 
+      out string vnffg_classifier_id);
+
+    /*
+     * Classifier related APIs
+     */
+    public abstract RwTypes.RwStatus terminate_vnffg_classifier(
+      Rwsdn.SDNAccount account,
+      string vnffg_classifier_id);
+
+
+
+    /*
+     * Node Related APIs
+     */
+     /* TODO */
+
+    /*
+     * Termination-point Related APIs
+     */
+     /* TODO */
+
+    /*
+     * Link Related APIs
+     */
+     /* TODO */
+    
+  }
+}
+
+
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/CMakeLists.txt
new file mode 100644
index 0000000..357e2ab
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/CMakeLists.txt
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwsdn_mock rwsdn_mock.py)
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/rwsdn_mock.py b/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/rwsdn_mock.py
new file mode 100644
index 0000000..2c0ffcc
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/rwsdn_mock.py
@@ -0,0 +1,172 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import logging
+
+import gi
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwSdn', '1.0')
+from gi.repository import (
+    GObject,
+    RwSdn, # Vala package
+    RwTypes,
+    RwTopologyYang as RwTl,
+    RwsdnYang
+    )
+
+import rw_status
+import rwlogger
+
+logger = logging.getLogger('rwsdn.mock')
+
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+GRUNT118 = {"name": "grunt118", "ip_addr": "10.66.4.118", "tps": ["eth0"]}
+GRUNT44 = {"name": "grunt44", "ip_addr": "10.66.4.44", "tps": ["eth0"]}
+AS1 = {"name":"AristaSw1", "ip_addr": "10.66.4.54", "tps": ["Ethernet8/7","Ethernet8/8"]}
+NW_NODES = [GRUNT118, GRUNT44, AS1]
+NW_BIDIR_LINKS = [{"src" : ("grunt118","eth0"), "dest" : ("AristaSw1","Ethernet8/7")}, 
+            {"src" : ("grunt44","eth0"), "dest" : ("AristaSw1","Ethernet8/8")}]
+
+
+class DataStore(object):
+    def __init__(self):
+        self.topology = None
+        self.nw = None
+        self.next_mac = 11
+
+    def create_link(self, cfg_src_node, cfg_src_tp, cfg_dest_node, cfg_dest_tp):
+        lnk= self.nw.link.add()
+        lnk.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(cfg_src_node, cfg_src_tp, cfg_dest_node, cfg_dest_tp)
+        lnk.source.source_node = cfg_src_node
+        lnk.source.source_tp = cfg_src_tp
+        lnk.destination.dest_node = cfg_dest_node
+        lnk.destination.dest_tp = cfg_dest_tp
+        # L2 link augmentation
+        lnk.l2_link_attributes.name = cfg_src_tp + cfg_dest_tp
+        lnk.l2_link_attributes.rate = 1000000000.00
+
+    def create_tp(self, node, cfg_tp):
+        tp = node.termination_point.add()
+        tp.tp_id = ("urn:Rift:Lab:{}:{}").format(node.node_id, cfg_tp)
+        # L2 TP augmentation
+        tp.l2_termination_point_attributes.description = cfg_tp
+        tp.l2_termination_point_attributes.maximum_frame_size = 1500
+        tp.l2_termination_point_attributes.mac_address = "00:1e:67:d8:48:" + str(self.next_mac)
+        self.next_mac = self.next_mac + 1
+        tp.l2_termination_point_attributes.tp_state = "in_use"
+        tp.l2_termination_point_attributes.eth_encapsulation = "ethernet"
+
+    def create_node(self, cfg_node):
+        node = self.nw.node.add()
+        node.node_id = cfg_node['name']
+        # L2 Node augmentation
+        node.l2_node_attributes.name = cfg_node['name']
+        node.l2_node_attributes.description = "Host with OVS-DPDK"
+        node.l2_node_attributes.management_address.append(cfg_node['ip_addr'])
+        for cfg_tp in cfg_node['tps']:
+            self.create_tp(node, cfg_tp)
+        
+    def create_default_topology(self):
+        logger.debug('Creating default topology: ')
+
+        self.topology = RwTl.YangData_IetfNetwork()
+        self.nw = self.topology.network.add()
+        self.nw.network_id = "L2HostTopology-Def1"
+        self.nw.server_provided = 'true'
+
+        # L2 Network type augmentation
+        self.nw.network_types.l2_network = self.nw.network_types.l2_network.new()
+        # L2 Network augmentation
+        self.nw.l2_network_attributes.name = "Rift LAB SFC-Demo Host Network"
+
+        for cfg_node in NW_NODES:
+            self.create_node(cfg_node)
+
+        for cfg_link in NW_BIDIR_LINKS:
+            self.create_link(cfg_link['src'][0], cfg_link['src'][1], cfg_link['dest'][0], cfg_link['dest'][1])
+            self.create_link(cfg_link['src'][1], cfg_link['src'][0], cfg_link['dest'][1], cfg_link['dest'][0])
+
+        return self.topology
+ 
+        
+class Resources(object):
+    def __init__(self):
+        self.networks = dict()
+        
+
+class MockPlugin(GObject.Object, RwSdn.Topology):
+    """This class implements the abstract methods in the Topology class.
+    Mock is used for unit testing."""
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self.resources = collections.defaultdict(Resources)
+        self.datastore = None
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    subcategory="rwsdn.mock",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+        account = RwsdnYang.SDNAccount()
+        account.name = 'mock'
+        account.account_type = 'mock'
+        account.mock.username = 'rift'
+
+        self.datastore = DataStore()
+        self.topology = self.datastore.create_default_topology()
+            
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network_list(self, account):
+        """
+        Returns the list of discovered network
+
+        @param account - a SDN account
+
+        """
+        logger.debug('Get network list: ')
+
+        if (self.topology):
+            logger.debug('Returning network list: ')
+            return self.topology
+
+        logger.debug('Returning empty network list: ')
+        return None
+
+        
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/CMakeLists.txt
new file mode 100644
index 0000000..239f971
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/CMakeLists.txt
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwsdn_odl rwsdn_odl.py)
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/rwsdn_odl.py b/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/rwsdn_odl.py
new file mode 100644
index 0000000..3eb39fc
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/rwsdn_odl.py
@@ -0,0 +1,1082 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+
+import requests
+
+import json
+import re
+import socket
+import time
+
+import gi
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwsdnYang', '1.0')
+gi.require_version('RwSdn', '1.0')
+gi.require_version('RwTopologyYang','1.0')
+
+from gi.repository import (
+    GObject,
+    RwSdn, # Vala package
+    RwTypes,
+    RwsdnYang, 
+    RwTopologyYang as RwTl,
+    )
+
+import rw_status
+import rwlogger
+
+
+logger = logging.getLogger('rwsdn.sdnodl')
+logger.setLevel(logging.DEBUG)
+
+
+sff_rest_based = True
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class SdnOdlPlugin(GObject.Object, RwSdn.Topology):
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self.sdnodl = SdnOdl()
+        
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-cal-log",
+                    subcategory="odl", 
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_sdn_creds(self, account):
+        """
+        Validates the sdn account credentials for the specified account.
+        Performs an access to the resources using Keystone API. If creds
+        are not valid, returns an error code & reason string
+
+        @param account - a SDN account
+
+        Returns:
+            Validation Code and Details String
+        """
+        #logger.debug('Received validate SDN creds')
+        status = self.sdnodl.validate_account_creds(account)
+        #logger.debug('Done with validate SDN creds: %s', type(status))
+        return status
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network_list(self, account):
+        """
+        Returns the list of discovered networks
+
+        @param account - a SDN account
+
+        """
+        logger.debug('Received Get network list: ')
+        nwtop = self.sdnodl.get_network_list( account)
+        logger.debug('Done with get network list: %s', type(nwtop))
+        return nwtop
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vnffg_chain(self, account,vnffg_chain):
+        """
+        Creates Service Function chain in ODL
+
+        @param account - a SDN account
+
+        """
+        logger.debug('Received Create VNFFG chain ')
+        vnffg_id = self.sdnodl.create_sfc( account,vnffg_chain)
+        logger.debug('Done with create VNFFG chain with name : %s', vnffg_id)
+        return vnffg_id
+
+    @rwstatus
+    def do_terminate_vnffg_chain(self, account,vnffg_id):
+        """
+        Terminate Service Function chain in ODL
+
+        @param account - a SDN account
+
+        """
+        logger.debug('Received terminate VNFFG chain for id %s ', vnffg_id)
+        # TODO: Currently all the RSP, SFPs , SFFs and SFs are deleted
+        # Need to handle deletion of specific RSP, SFFs, SFs etc
+        self.sdnodl.terminate_all_sfc(account)
+        logger.debug('Done with terminate VNFFG chain with name : %s', vnffg_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vnffg_rendered_paths(self, account):
+        """
+           Get ODL Rendered Service Path List (SFC)
+
+           @param account - a SDN account
+        """
+        vnffg_list = self.sdnodl.get_rsp_list(account)
+        return vnffg_list 
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_vnffg_classifier(self, account, vnffg_classifier):
+        """
+           Add VNFFG Classifier 
+
+           @param account - a SDN account
+        """
+        classifier_name = self.sdnodl.create_sfc_classifier(account,vnffg_classifier)
+        return classifier_name 
+
+    @rwstatus(ret_on_failure=[None])
+    def do_terminate_vnffg_classifier(self, account, vnffg_classifier_name):
+        """
+           Add VNFFG Classifier 
+
+           @param account - a SDN account
+        """
+        self.sdnodl.terminate_sfc_classifier(account,vnffg_classifier_name)
+
+
+class Sff(object):
+    """
+    Create SFF object to hold SFF related details
+    """
+
+    def __init__(self,sff_name, mgmt_address, mgmt_port, dp_address, dp_port,sff_dp_name, sff_br_name=''):
+        self.name = sff_name
+        self.ip = mgmt_address
+        self.sff_rest_port = mgmt_port
+        self.sff_port = dp_port
+        self.dp_name = sff_dp_name
+        self.dp_ip = dp_address
+        self.br_name = sff_br_name
+        self.sf_dp_list = list()
+    
+    def add_sf_dp_to_sff(self,sf_dp):
+        self.sf_dp_list.append(sf_dp)
+
+    def __repr__(self):
+        return 'Name:{},Bridge Name:{}, IP: {}, SF List: {}'.format(self.dp_name,self.br_name, self.ip, self.sf_dp_list) 
+
+class SfDpLocator(object):
+    """
+    Create Service Function Data Plane Locator related Object to hold details related to each DP Locator endpoint
+    """
+    def __init__(self,name,sfdp_id,vnfr_name,vm_id):
+        self.name = name
+        self.port_id = sfdp_id
+        self.vnfr_name = vnfr_name
+        self.vm_id = vm_id
+        self.sff_name = None 
+        self.ovsdb_tp_name = None
+
+    def _update_sff_name(self,sff_name):
+        self.sff_name = sff_name
+
+    def _update_vnf_params(self,service_function_type,address, port,transport_type):
+        self.service_function_type = service_function_type
+        self.address = address
+        self.port = port
+        self.transport_type = "service-locator:{}".format(transport_type)
+
+    def __repr__(self):
+        return 'Name:{},Port id:{}, VNFR ID: {}, VM ID: {}, SFF Name: {}'.format(self.name,self.port_id, self.vnfr_name, self.vm_id,self.sff_name) 
+
+class SdnOdl(object):
+    """
+    SDN ODL Class to support REST based API calls
+    """
+
+    @property
+    def _network_topology_path(self):
+        return 'restconf/operational/network-topology:network-topology'  
+
+    @property
+    def _node_inventory_path(self):
+        return 'restconf/operational/opendaylight-inventory:nodes'  
+     
+    def _network_topology_rest_url(self,account):
+        return '{}/{}'.format(account.odl.url,self._network_topology_path)
+
+    def _node_inventory_rest_url(self,account):
+        return '{}/{}'.format(account.odl.url,self._node_inventory_path)
+
+    def _get_rest_url(self,account, rest_path):
+        return '{}/{}'.format(account.odl.url,rest_path)
+
+
+    def _get_peer_termination_point(self,node_inv,tp_id):
+        for node in node_inv['nodes']['node']:
+            if "node-connector" in node and len(node['node-connector']) > 0:
+                for nodec in node['node-connector']:
+                    if ("flow-node-inventory:name" in nodec and nodec["flow-node-inventory:name"] == tp_id):
+                        return(node['id'], nodec['id'])
+        return (None,None)
+
+    def _get_termination_point_mac_address(self,node_inv,tp_id):
+        for node in node_inv['nodes']['node']:
+            if "node-connector" in node and len(node['node-connector']) > 0:
+                for nodec in node['node-connector']:
+                    if ("flow-node-inventory:name" in nodec and nodec["flow-node-inventory:name"] == tp_id):
+                        return nodec.get("flow-node-inventory:hardware-address")
+
+    def _add_host(self,ntwk,node,term_point,vmid,node_inv):
+        for ntwk_node in ntwk.node:
+            if ntwk_node.node_id ==  vmid:
+                break
+        else:
+            ntwk_node = ntwk.node.add()
+            if "ovsdb:bridge-name" in node:
+                ntwk_node.rw_node_attributes.ovs_bridge_name = node["ovsdb:bridge-name"]
+            ntwk_node.node_id = vmid
+            intf_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'iface-id']
+            if intf_id:
+                ntwk_node_tp = ntwk_node.termination_point.add()
+                ntwk_node_tp.tp_id = intf_id[0]['external-id-value']
+                att_mac = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'attached-mac']
+                if att_mac:
+                    ntwk_node_tp.l2_termination_point_attributes.mac_address = att_mac[0]['external-id-value']
+                peer_node,peer_node_tp = self._get_peer_termination_point(node_inv,term_point['tp-id'])
+                if peer_node and peer_node_tp:
+                    nw_lnk = ntwk.link.add()
+                    nw_lnk.source.source_tp = ntwk_node_tp.tp_id
+                    nw_lnk.source.source_node = ntwk_node.node_id
+                    nw_lnk.destination.dest_tp = term_point['tp-id']
+                    nw_lnk.destination.dest_node = node['node-id']
+                    nw_lnk.link_id = peer_node_tp + '-' + 'source'
+
+                    nw_lnk = ntwk.link.add()
+                    nw_lnk.source.source_tp = term_point['tp-id']
+                    nw_lnk.source.source_node = node['node-id']
+                    nw_lnk.destination.dest_tp = ntwk_node_tp.tp_id
+                    nw_lnk.destination.dest_node = ntwk_node.node_id
+                    nw_lnk.link_id = peer_node_tp + '-' + 'dest'
+
+    def _get_address_from_node_inventory(self,node_inv,node_id):
+        for node in node_inv['nodes']['node']:
+            if node['id'] == node_id:
+                return node["flow-node-inventory:ip-address"]
+        return None
+
+    def _fill_network_list(self,nw_topo,node_inventory):
+        """
+        Fill Topology related information
+        """
+        nwtop = RwTl.YangData_IetfNetwork()
+
+        for topo in nw_topo['network-topology']['topology']:
+            if ('node' in topo and len(topo['node']) > 0):
+                ntwk = nwtop.network.add()
+                ntwk.network_id = topo['topology-id']
+                ntwk.server_provided = True
+                for node in topo['node']:
+                    if ('termination-point' in node and len(node['termination-point']) > 0):
+                        ntwk_node = ntwk.node.add()
+                        ntwk_node.node_id = node['node-id']
+                        addr = self._get_address_from_node_inventory(node_inventory,ntwk_node.node_id)
+                        if addr:
+                            ntwk_node.l2_node_attributes.management_address.append(addr)
+                        for term_point in node['termination-point']:
+                            ntwk_node_tp = ntwk_node.termination_point.add()
+                            ntwk_node_tp.tp_id = term_point['tp-id']
+                            mac_address = self._get_termination_point_mac_address(node_inventory,term_point['tp-id'])
+                            if mac_address:
+                                ntwk_node_tp.l2_termination_point_attributes.mac_address = mac_address
+                            if 'ovsdb:interface-external-ids' in term_point:
+                                vm_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'vm-id']
+                                if vm_id:
+                                    vmid = vm_id[0]['external-id-value']
+                                    self._add_host(ntwk,node,term_point,vmid,node_inventory)
+                if ('link' in topo and len(topo['link']) > 0):
+                    for link in topo['link']:
+                        nw_link = ntwk.link.add()
+                        if 'destination' in link:  
+                            nw_link.destination.dest_tp = link['destination'].get('dest-tp')
+                            nw_link.destination.dest_node = link['destination'].get('dest-node')
+                        if 'source' in link:
+                            nw_link.source.source_node = link['source'].get('source-node')
+                            nw_link.source.source_tp = link['source'].get('source-tp')
+                        nw_link.link_id = link.get('link-id')
+        return nwtop
+
+
+    def validate_account_creds(self, account):
+        """
+            Validate the SDN account credentials by accessing the rest API using the provided credentials
+        """
+        status = RwsdnYang.SdnConnectionStatus()
+        url = '{}/{}'.format(account.odl.url,"restconf")
+        try:
+            r=requests.get(url,auth=(account.odl.username,account.odl.password))
+            r.raise_for_status()
+        except requests.exceptions.HTTPError as e:
+            msg = "SdnOdlPlugin: SDN account credential validation failed. Exception: %s", str(e)
+            #logger.error(msg)
+            print(msg)
+            status.status = "failure"
+            status.details = "Invalid Credentials: %s" % str(e)
+        except Exception as e:
+            msg = "SdnPdlPlugin: SDN connection failed. Exception: %s", str(e)
+            #logger.error(msg)
+            print(msg)
+            status.status = "failure"
+            status.details = "Connection Failed (Invlaid URL): %s" % str(e)
+        else:
+            print("SDN Successfully connected")
+            status.status = "success"
+            status.details = "Connection was successful"
+
+        return status
+
+    def get_network_list(self, account):
+        """
+           Get the networks details from ODL
+        """
+        url = self._network_topology_rest_url(account)
+        r=requests.get(url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+        nw_topo = r.json()
+
+        url = self._node_inventory_rest_url(account)
+        r = requests.get(url,auth=(account.odl.username,account.odl.password)) 
+        r.raise_for_status()
+        node_inventory = r.json()
+        return self._fill_network_list(nw_topo,node_inventory)
+
+    @property
+    def _service_functions_path(self):
+        return 'restconf/config/service-function:service-functions'
+
+    @property
+    def _service_function_path(self):
+        return 'restconf/config/service-function:service-functions/service-function/{}'
+
+    @property
+    def _service_function_forwarders_path(self):
+        return 'restconf/config/service-function-forwarder:service-function-forwarders'
+
+    @property
+    def _service_function_forwarder_path(self):
+        return 'restconf/config/service-function-forwarder:service-function-forwarders/service-function-forwarder/{}'
+
+    @property
+    def _service_function_chains_path(self):
+        return 'restconf/config/service-function-chain:service-function-chains'
+
+    @property
+    def _service_function_chain_path(self):
+        return 'restconf/config/service-function-chain:service-function-chains/service-function-chain/{}'
+   
+    @property
+    def _sfp_metadata_path(self):
+        return 'restconf/config/service-function-path-metadata:service-function-metadata/context-metadata/{}'
+   
+    @property
+    def _sfps_metadata_path(self):
+        return 'restconf/config/service-function-path-metadata:service-function-metadata'
+   
+    @property
+    def _sfps_path(self):
+        return 'restconf/config/service-function-path:service-function-paths'
+
+    @property
+    def _sfp_path(self):
+        return 'restconf/config/service-function-path:service-function-paths/service-function-path/{}'
+
+
+    @property
+    def _create_rsp_path(self):
+        return 'restconf/operations/rendered-service-path:create-rendered-path'
+
+    @property
+    def _delete_rsp_path(self):
+        return 'restconf/operations/rendered-service-path:delete-rendered-path'
+
+
+    @property
+    def _get_rsp_paths(self):
+        return 'restconf/operational/rendered-service-path:rendered-service-paths'
+
+    @property
+    def _get_rsp_path(self):
+        return 'restconf/operational/rendered-service-path:rendered-service-paths/rendered-service-path/{}'
+
+    @property
+    def _access_list_path(self):
+        return 'restconf/config/ietf-access-control-list:access-lists/acl/{}'
+
+    @property
+    def _service_function_classifier_path(self):
+        return 'restconf/config/service-function-classifier:service-function-classifiers/service-function-classifier/{}'
+
+    @property
+    def _access_lists_path(self):
+        return 'restconf/config/ietf-access-control-list:access-lists'
+
+    @property
+    def _service_function_classifiers_path(self):
+        return 'restconf/config/service-function-classifier:service-function-classifiers'
+
+
+    def _create_sf(self,account,vnffg_chain,sf_dp_list):
+        "Create SF"
+        sf_json = {}
+
+        for vnf in vnffg_chain.vnf_chain_path:
+            for vnfr in vnf.vnfr_ids:
+                sf_url = self._get_rest_url(account,self._service_function_path.format(vnfr.vnfr_name))
+                print(sf_url)
+                r=requests.get(sf_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'})
+                # If the SF is not found; create new SF
+                if r.status_code == 200:
+                    logger.info("SF with name %s is already present in ODL. Skipping update", vnfr.vnfr_name)
+                    continue
+                elif r.status_code != 404:
+                    r.raise_for_status()
+
+                sf_dict = {}
+                sf_dict['name'] = vnfr.vnfr_name
+                sf_dict['nsh-aware'] = vnf.nsh_aware
+                sf_dict['type'] = vnf.service_function_type
+                sf_dict['ip-mgmt-address'] = vnfr.mgmt_address
+                sf_dict['rest-uri'] = 'http://{}:{}'.format(vnfr.mgmt_address, vnfr.mgmt_port)
+
+                sf_dict['sf-data-plane-locator'] = list()
+                for vdu in vnfr.vdu_list:
+                    sf_dp = {}
+                    if vdu.port_id in sf_dp_list.keys():
+                        sf_dp_entry = sf_dp_list[vdu.port_id]
+                        sf_dp['name'] = sf_dp_entry.name
+                        sf_dp['ip'] = vdu.address
+                        sf_dp['port'] = vdu.port
+                        sf_dp['transport'] = "service-locator:{}".format(vnf.transport_type)
+                        if vnfr.sff_name:
+                            sf_dp['service-function-forwarder'] = vnfr.sff_name
+                        else:
+                            sff_name = sf_dp_entry.sff_name
+                            if sff_name is None:
+                                logger.error("SFF not found for port %s in SF %s", vdu.port_id, vnfr.vnfr_name)
+                            sf_dp['service-function-forwarder'] = sff_name
+                            sf_dp['service-function-ovs:ovs-port'] = dict()
+                            if sf_dp_entry.ovsdb_tp_name is not None:
+                                sf_dp['service-function-ovs:ovs-port']['port-id'] =  sf_dp_entry.ovsdb_tp_name
+                        sf_dict['sf-data-plane-locator'].append(sf_dp)
+                    else:
+                        logger.error("Port %s not found in SF DP list",vdu.port_id)
+
+                sf_json['service-function'] = sf_dict
+                sf_data = json.dumps(sf_json)
+                sf_url = self._get_rest_url(account,self._service_function_path.format(vnfr.vnfr_name))
+                print(sf_url)
+                print(sf_data)
+                r=requests.put(sf_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sf_data)
+                r.raise_for_status()
+
+
+    def _create_sff(self,account,vnffg_chain,sff):
+        "Create SFF"
+        sff_json = {}
+        sff_dict = {}
+        #sff_dp_name = "SFF1" + '-' + 'DP1'
+        sff_dp_name = sff.dp_name 
+                
+        sff_url = self._get_rest_url(account,self._service_function_forwarder_path.format(sff.name))
+        print(sff_url)
+        r=requests.get(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'})
+        # If the SFF is not found; create new SF
+        if r.status_code == 200:
+            logger.info("SFF with name %s is already present in ODL. Skipping full update", sff.name)
+            sff_dict = r.json()
+            sff_updated = False
+            for sf_dp in sff.sf_dp_list:
+                for sff_sf in sff_dict['service-function-forwarder'][0]['service-function-dictionary']:
+                    if sf_dp.vnfr_name == sff_sf['name']:
+                        logger.info("SF with name %s is already found in SFF %s SF Dictionay. Skipping update",sf_dp.vnfr_name,sff.name) 
+                        break
+                else:
+                    logger.info("SF with name %s is not found in SFF %s SF Dictionay",sf_dp.vnfr_name, sff.name)
+                    sff_updated = True
+                    sff_sf_dict = {}
+                    sff_sf_dp_loc = {}
+                    sff_sf_dict['name'] = sf_dp.vnfr_name
+
+                    # Below two lines are enabled only for ODL Beryillium
+                    sff_sf_dp_loc['sff-dpl-name'] = sff_dp_name
+                    sff_sf_dp_loc['sf-dpl-name'] = sf_dp.name
+
+                    sff_sf_dict['sff-sf-data-plane-locator'] = sff_sf_dp_loc
+                    sff_dict['service-function-forwarder'][0]['service-function-dictionary'].append(sff_sf_dict)
+            if sff_updated is True:
+                sff_data = json.dumps(sff_dict)
+                print(sff_data)
+                r=requests.put(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sff_data)
+                r.raise_for_status()
+            return        
+        elif r.status_code != 404:
+            r.raise_for_status()
+        
+        sff_name = sff.name
+        sff_ip = sff.ip
+        sff_dp_ip = sff.dp_ip
+        sff_port = sff.sff_port
+        sff_bridge_name = ''
+        sff_rest_port = sff.sff_rest_port
+        sff_ovs_op = {}
+        if sff_rest_based is False:
+            sff_bridge_name = sff.br_name
+            sff_ovs_op  = {"key": "flow",
+                       "nshc1": "flow",
+                       "nsp": "flow",
+                       "remote-ip": "flow",
+                       "dst-port": sff_port,
+                       "nshc3": "flow",
+                       "nshc2": "flow",
+                       "nshc4": "flow",
+                       "nsi": "flow"}
+
+
+        sff_dict['name'] = sff_name
+        sff_dict['service-node'] = ''
+        sff_dict['ip-mgmt-address'] = sff_ip
+        if sff_rest_based:
+            sff_dict['rest-uri'] = 'http://{}:{}'.format(sff_ip, sff_rest_port)
+        else:
+            sff_dict['service-function-forwarder-ovs:ovs-bridge'] = {"bridge-name": sff_bridge_name}
+        sff_dict['service-function-dictionary'] = list()
+        for sf_dp in sff.sf_dp_list:
+            sff_sf_dict = {}
+            sff_sf_dp_loc = {}
+            sff_sf_dict['name'] = sf_dp.vnfr_name
+
+            # Below set of lines are reqd for Lithium
+            #sff_sf_dict['type'] = sf_dp.service_function_type
+            #sff_sf_dp_loc['ip'] = sf_dp.address
+            #sff_sf_dp_loc['port'] = sf_dp.port
+            #sff_sf_dp_loc['transport'] = sf_dp.transport_type
+            #sff_sf_dp_loc['service-function-forwarder-ovs:ovs-bridge'] = {}
+
+            # Below two lines are enabled only for ODL Beryillium
+            sff_sf_dp_loc['sff-dpl-name'] = sff_dp_name
+            sff_sf_dp_loc['sf-dpl-name'] = sf_dp.name
+
+            sff_sf_dict['sff-sf-data-plane-locator'] = sff_sf_dp_loc
+            sff_dict['service-function-dictionary'].append(sff_sf_dict)
+
+        sff_dict['sff-data-plane-locator'] = list()
+        sff_dp = {}
+        dp_loc = {} 
+        sff_dp['name'] = sff_dp_name 
+        dp_loc['ip'] = sff_dp_ip
+        dp_loc['port'] = sff_port
+        dp_loc['transport'] = 'service-locator:vxlan-gpe'
+        sff_dp['data-plane-locator'] = dp_loc
+        if sff_rest_based is False:
+            sff_dp['service-function-forwarder-ovs:ovs-options'] = sff_ovs_op
+            #sff_dp["service-function-forwarder-ovs:ovs-bridge"] = {'bridge-name':sff_bridge_name}
+            sff_dp["service-function-forwarder-ovs:ovs-bridge"] = {}
+        sff_dict['sff-data-plane-locator'].append(sff_dp)
+
+        sff_json['service-function-forwarder'] = sff_dict
+        sff_data = json.dumps(sff_json)
+        print(sff_data)
+        r=requests.put(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sff_data)
+        r.raise_for_status()
+
+    def _create_sfc(self,account,vnffg_chain):
+        "Create SFC"
+        sfc_json = {}
+        sfc_dict = {}
+        sfc_dict['name'] = vnffg_chain.name
+        sfc_dict['sfc-service-function'] = list()
+        vnf_chain_list = sorted(vnffg_chain.vnf_chain_path, key = lambda x: x.order)
+        for vnf in vnf_chain_list:
+            sfc_sf_dict = {}
+            sfc_sf_dict['name'] = vnf.service_function_type
+            sfc_sf_dict['type'] = vnf.service_function_type
+            sfc_sf_dict['order'] = vnf.order 
+            sfc_dict['sfc-service-function'].append(sfc_sf_dict)
+        sfc_json['service-function-chain'] = sfc_dict
+        sfc_data = json.dumps(sfc_json)
+        sfc_url = self._get_rest_url(account,self._service_function_chain_path.format(vnffg_chain.name))
+        print(sfc_url)
+        print(sfc_data)
+        r=requests.put(sfc_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfc_data)
+        r.raise_for_status()
+       
+    def _create_sfp_metadata(self,account,sfc_classifier):
+        " Create SFP metadata"
+        sfp_meta_json = {}
+        sfp_meta_dict = {}
+        sfp_meta_dict['name'] = sfc_classifier.name
+        if sfc_classifier.vnffg_metadata.ctx1:
+            sfp_meta_dict['context-header1'] = sfc_classifier.vnffg_metadata.ctx1
+        if sfc_classifier.vnffg_metadata.ctx2:
+            sfp_meta_dict['context-header2'] = sfc_classifier.vnffg_metadata.ctx2
+        if sfc_classifier.vnffg_metadata.ctx3:
+            sfp_meta_dict['context-header3'] = sfc_classifier.vnffg_metadata.ctx3
+        if sfc_classifier.vnffg_metadata.ctx4:
+            sfp_meta_dict['context-header4'] = sfc_classifier.vnffg_metadata.ctx4
+
+        sfp_meta_json['context-metadata'] = sfp_meta_dict
+        sfp_meta_data = json.dumps(sfp_meta_json)
+        sfp_meta_url = self._get_rest_url(account,self._sfp_metadata_path.format(sfc_classifier.name))
+        print(sfp_meta_url)
+        print(sfp_meta_data)
+        r=requests.put(sfp_meta_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfp_meta_data)
+        r.raise_for_status()
+
+    def _create_sfp(self,account,vnffg_chain, sym_chain=False,classifier_name=None,vnffg_metadata_name=None):
+        "Create SFP"
+        sfp_json = {}
+        sfp_dict = {}
+        sfp_dict['name'] = vnffg_chain.name
+        sfp_dict['service-chain-name'] = vnffg_chain.name
+        sfp_dict['symmetric'] = sym_chain
+        sfp_dict['transport-type'] = 'service-locator:vxlan-gpe'
+        if vnffg_metadata_name:
+            sfp_dict['context-metadata'] = vnffg_metadata_name 
+        if classifier_name: 
+            sfp_dict['classifier'] = classifier_name 
+
+        sfp_json['service-function-path'] = sfp_dict
+        sfp_data = json.dumps(sfp_json)
+        sfp_url = self._get_rest_url(account,self._sfp_path.format(vnffg_chain.name))
+        print(sfp_url)
+        print(sfp_data)
+        r=requests.put(sfp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfp_data)
+        r.raise_for_status()
+
+    def _create_rsp(self,account,vnffg_chain_name, sym_chain=True):
+        "Create RSP"
+        rsp_json = {}
+        rsp_input = {}
+        rsp_json['input'] = {}
+        rsp_input['name'] = vnffg_chain_name
+        rsp_input['parent-service-function-path'] = vnffg_chain_name
+        rsp_input['symmetric'] = sym_chain
+
+        rsp_json['input'] = rsp_input
+        rsp_data = json.dumps(rsp_json)
+        self._rsp_data = rsp_json
+        rsp_url = self._get_rest_url(account,self._create_rsp_path)
+        print(rsp_url)
+        print(rsp_data)
+        r=requests.post(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=rsp_data)
+        r.raise_for_status()
+        print(r.json())
+        output_json = r.json()
+        return output_json['output']['name']
+        
+    def _get_sff_list_for_chain(self, account,sf_dp_list):
+        """
+        Get List of all SFF that needs to be created based on VNFs included in VNFFG chain.
+        """
+
+        sff_list = {}
+        if sf_dp_list is None:
+            logger.error("VM List for vnffg chain is empty while trying to get SFF list")
+        url = self._network_topology_rest_url(account)
+        r=requests.get(url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+        nw_topo = r.json()
+
+        for topo in nw_topo['network-topology']['topology']:
+            if ('node' in topo and len(topo['node']) > 0):
+                for node in topo['node']:
+                    if ('termination-point' in node and len(node['termination-point']) > 0):
+                        for term_point in node['termination-point']:
+                            if 'ovsdb:interface-external-ids' in term_point:
+                                vm_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'vm-id']
+                                if len(vm_id) == 0:
+                                    continue
+                                vmid = vm_id[0]['external-id-value']
+                                intf_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'iface-id']
+                                if len(intf_id) == 0:
+                                    continue 
+                                intfid = intf_id[0]['external-id-value'] 
+                                if intfid not in sf_dp_list.keys():
+                                    continue
+                                if sf_dp_list[intfid].vm_id != vmid:
+                                    logger.error("Intf ID %s is not present in VM %s", intfid, vmid)  
+                                    continue 
+                                sf_dp_list[intfid].ovsdb_tp_name = term_point['ovsdb:name']
+                           
+                                if 'ovsdb:managed-by' in node:
+                                    rr=re.search('network-topology:node-id=\'([-\w\:\/]*)\'',node['ovsdb:managed-by'])
+                                    node_id = rr.group(1)
+                                    ovsdb_node = [node  for node in topo['node'] if node['node-id'] == node_id]
+                                    if ovsdb_node:
+                                        if 'ovsdb:connection-info' in ovsdb_node[0]:
+                                            sff_ip = ovsdb_node[0]['ovsdb:connection-info']['local-ip']
+                                            sff_br_name = node['ovsdb:bridge-name']
+                                            sff_br_uuid = node['ovsdb:bridge-uuid']
+                                            sff_dp_ip = sff_ip
+
+                                            if 'ovsdb:openvswitch-other-configs' in  ovsdb_node[0]: 
+                                                for other_key in ovsdb_node[0]['ovsdb:openvswitch-other-configs']:
+                                                    if other_key['other-config-key'] == 'local_ip':
+                                                        local_ip_str = other_key['other-config-value']
+                                                        sff_dp_ip = local_ip_str.split(',')[0]
+                                                        break
+
+                                            sff_name = socket.getfqdn(sff_ip)
+                                            if sff_br_uuid in sff_list:
+                                                sff_list[sff_name].add_sf_dp_to_sff(sf_dp_list[intfid])
+                                                sf_dp_list[intfid]._update_sff_name(sff_name)
+                                            else:
+                                                sff_dp_ip = sff_ip   #overwrite sff_dp_ip to SFF ip for now
+                                                sff_list[sff_name] = Sff(sff_name,sff_ip,6000, sff_dp_ip, 4790,sff_br_uuid,sff_br_name)
+                                                sf_dp_list[intfid]._update_sff_name(sff_name)
+                                                sff_list[sff_name].add_sf_dp_to_sff(sf_dp_list[intfid])
+        return sff_list
+                                         
+
+    def _get_sf_dp_list_for_chain(self,account,vnffg_chain):
+        """
+        Get list of all Service Function Data Plane Locators present in VNFFG 
+        useful for easy reference while creating SF and SFF
+        """
+        sfdp_list = {}
+        for vnf in vnffg_chain.vnf_chain_path:
+            for vnfr in vnf.vnfr_ids:
+                for vdu in vnfr.vdu_list:
+                    sfdp = SfDpLocator(vdu.name,vdu.port_id,vnfr.vnfr_name, vdu.vm_id)
+                    sfdp._update_vnf_params(vnf.service_function_type, vdu.address, vdu.port, vnf.transport_type)
+                    if vnfr.sff_name:
+                        sfdp._update_sff_name(vnfr.sff_name)
+                    sfdp_list[vdu.port_id] = sfdp 
+        return sfdp_list
+
+    def create_sfc(self, account, vnffg_chain):
+        "Create SFC chain"
+
+        sff_list = {}
+        sf_dp_list = {}
+
+        sf_dp_list = self._get_sf_dp_list_for_chain(account,vnffg_chain)
+
+        if sff_rest_based is False and len(vnffg_chain.sff) == 0:
+            # Get the list of all SFFs required for vnffg chain
+            sff_list = self._get_sff_list_for_chain(account,sf_dp_list)
+
+        for sff in vnffg_chain.sff:
+          sff_list[sff.name] = Sff(sff.name, sff.mgmt_address,sff.mgmt_port,sff.dp_endpoints[0].address, sff.dp_endpoints[0].port, sff.name)
+          for _,sf_dp in sf_dp_list.items():
+              if sf_dp.sff_name and sf_dp.sff_name == sff.name:
+                  sff_list[sff.name].add_sf_dp_to_sff(sf_dp) 
+
+        #Create all the SF in VNFFG chain
+        self._create_sf(account,vnffg_chain,sf_dp_list)
+
+        for _,sff in sff_list.items():
+            self._create_sff(account,vnffg_chain,sff)
+
+
+        self._create_sfc(account,vnffg_chain)
+
+        self._create_sfp(account,vnffg_chain,classifier_name=vnffg_chain.classifier_name,
+                                   vnffg_metadata_name=vnffg_chain.classifier_name)
+
+        ## Update to SFF could have deleted some RSP; so get list of SFP and 
+        ## check RSP exists for same and create any as necessary
+        #rsp_name = self._create_rsp(account,vnffg_chain)
+        #return rsp_name
+        self._create_all_rsps(account)
+        self._recreate_all_sf_classifiers(account)
+        return vnffg_chain.name
+
+    def _recreate_all_sf_classifiers(self,account):
+        """
+        Re create all SF classifiers
+        """
+        sfcl_url = self._get_rest_url(account,self._service_function_classifiers_path)
+        print(sfcl_url)
+        #Get the classifier
+        r=requests.get(sfcl_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'})
+        if r.status_code == 200:
+            print(r)
+            sfcl_json = r.json()
+        elif r.status_code == 404:
+            return         
+        else: 
+            r.raise_for_status()
+
+        #Delete the classifiers and re-add same back
+        r=requests.delete(sfcl_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+        #Readd it back
+        time.sleep(3)
+        print(sfcl_json)
+        sfcl_data = json.dumps(sfcl_json)
+        r=requests.put(sfcl_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfcl_data)
+        r.raise_for_status()
+
+    def _create_all_rsps(self,account):
+        """
+        Create all the RSPs for SFP found
+        """
+        sfps_url = self._get_rest_url(account,self._sfps_path)
+        r=requests.get(sfps_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'})
+        r.raise_for_status()
+        sfps_json = r.json()
+        if 'service-function-path' in sfps_json['service-function-paths']:
+            for sfp in sfps_json['service-function-paths']['service-function-path']:
+                rsp_url = self._get_rest_url(account,self._get_rsp_path.format(sfp['name']))
+                r = requests.get(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'})
+                if r.status_code == 404:
+                    # Create the RSP
+                    logger.info("Creating RSP for Service Path with name %s",sfp['name'])
+                    self._create_rsp(account,sfp['name'])
+
+    def delete_all_sf(self, account):
+        "Delete all the SFs"
+        sf_url = self._get_rest_url(account,self._service_functions_path)
+        print(sf_url)
+        r=requests.delete(sf_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+
+
+    def delete_all_sff(self, account):
+        "Delete all the SFFs"
+        sff_url = self._get_rest_url(account,self._service_function_forwarders_path)
+        print(sff_url)
+        r=requests.delete(sff_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+
+    def delete_all_sfc(self, account):
+        "Delete all the SFCs"
+        sfc_url = self._get_rest_url(account,self._service_function_chains_path)
+        print(sfc_url)
+        r=requests.delete(sfc_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+
+    def delete_all_sfp_metadata(self, account):
+        "Delete all the SFPs metadata"
+        sfp_metadata_url = self._get_rest_url(account,self._sfps_metadata_path)
+        print(sfp_metadata_url)
+        r=requests.delete(sfp_metadata_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+
+    def delete_all_sfp(self, account):
+        "Delete all the SFPs"
+        sfp_url = self._get_rest_url(account,self._sfps_path)
+        print(sfp_url)
+        r=requests.delete(sfp_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+
+    def delete_all_rsp(self, account):
+        "Delete all the RSP"
+        #rsp_list = self.get_rsp_list(account)
+        url = self._get_rest_url(account,self._get_rsp_paths)
+        print(url)
+        r = requests.get(url,auth=(account.odl.username,account.odl.password)) 
+        r.raise_for_status()
+        print(r.json())
+        rsp_list = r.json()
+
+        #for vnffg in rsp_list.vnffg_rendered_path: 
+        for sfc_rsp in rsp_list['rendered-service-paths']['rendered-service-path']:
+            rsp_json = {}
+            rsp_input = {}
+            rsp_json['input'] = {}
+            rsp_input['name'] = sfc_rsp['name']
+
+            rsp_json['input'] = rsp_input
+            rsp_data = json.dumps(rsp_json)
+            self._rsp_data = rsp_json
+            rsp_url = self._get_rest_url(account,self._delete_rsp_path)
+            print(rsp_url)
+            print(rsp_data)
+
+            r=requests.post(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=rsp_data)
+            r.raise_for_status()
+            print(r.json())
+            #output_json = r.json()
+            #return output_json['output']['name']
+            
+    def terminate_all_sfc(self, account):
+        "Terminate SFC chain"
+        self.delete_all_rsp(account)
+        self.delete_all_sfp(account)
+        self.delete_all_sfc(account)
+        self.delete_all_sff(account)
+        self.delete_all_sf(account)
+
+    def _fill_rsp_list(self,sfc_rsp_list,sff_list):
+        vnffg_rsps = RwsdnYang.VNFFGRenderedPaths()
+        for sfc_rsp in sfc_rsp_list['rendered-service-paths']['rendered-service-path']:
+            rsp = vnffg_rsps.vnffg_rendered_path.add()
+            rsp.name = sfc_rsp['name']
+            rsp.path_id = sfc_rsp['path-id']
+            for sfc_rsp_hop in sfc_rsp['rendered-service-path-hop']:
+                rsp_hop = rsp.rendered_path_hop.add()
+                rsp_hop.hop_number =  sfc_rsp_hop['hop-number']
+                rsp_hop.service_index = sfc_rsp_hop['service-index']
+                rsp_hop.vnfr_name =  sfc_rsp_hop['service-function-name']
+                rsp_hop.service_function_forwarder.name = sfc_rsp_hop['service-function-forwarder']
+                for sff in sff_list['service-function-forwarders']['service-function-forwarder']:
+                    if sff['name'] == rsp_hop.service_function_forwarder.name:
+                        rsp_hop.service_function_forwarder.ip_address = sff['sff-data-plane-locator'][0]['data-plane-locator']['ip']
+                        rsp_hop.service_function_forwarder.port = sff['sff-data-plane-locator'][0]['data-plane-locator']['port']
+                        break
+        return vnffg_rsps
+             
+
+    def get_rsp_list(self,account):
+        "Get RSP list"
+
+        sff_url = self._get_rest_url(account,self._service_function_forwarders_path)
+        print(sff_url)
+        r=requests.get(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'})
+        r.raise_for_status()
+        sff_list = r.json()
+
+        url = self._get_rest_url(account,self._get_rsp_paths)
+        print(url)
+        r = requests.get(url,auth=(account.odl.username,account.odl.password)) 
+        r.raise_for_status()
+        print(r.json())
+        return self._fill_rsp_list(r.json(),sff_list)
+
+    def create_sfc_classifier(self, account, sfc_classifiers):
+        "Create SFC Classifiers"
+        self._create_sfp_metadata(account,sfc_classifiers)
+        self._add_acl_rules(account, sfc_classifiers)
+        self._create_sf_classifier(account, sfc_classifiers)
+        return sfc_classifiers.name
+
+    def terminate_sfc_classifier(self, account, sfc_classifier_name):
+        "Create SFC Classifiers"
+        self.delete_all_sfp_metadata(account)
+        self._terminate_sf_classifier(account, sfc_classifier_name)
+        self._del_acl_rules(account, sfc_classifier_name)
+
+    def _del_acl_rules(self,account,sfc_classifier_name):
+        " Terminate SF classifiers"
+        acl_url = self._get_rest_url(account,self._access_lists_path)
+        print(acl_url)
+        r=requests.delete(acl_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+
+    def _terminate_sf_classifier(self,account,sfc_classifier_name):
+        " Terminate SF classifiers"
+        sfcl_url = self._get_rest_url(account,self._service_function_classifiers_path)
+        print(sfcl_url)
+        r=requests.delete(sfcl_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+
+    def _create_sf_classifier(self,account,sfc_classifiers):
+        " Create SF classifiers"
+        sf_classifier_json = {}
+        sf_classifier_dict = {}
+        sf_classifier_dict['name'] = sfc_classifiers.name
+        sf_classifier_dict['access-list'] = sfc_classifiers.name
+        sf_classifier_dict['scl-service-function-forwarder'] = list()
+        scl_sff = {}
+        scl_sff_name = ''
+
+        if sfc_classifiers.has_field('sff_name') and sfc_classifiers.sff_name is not None:
+            scl_sff_name = sfc_classifiers.sff_name
+        elif  sfc_classifiers.has_field('port_id') and sfc_classifiers.has_field('vm_id'):
+            sf_dp = SfDpLocator(sfc_classifiers.port_id, sfc_classifiers.port_id,'', sfc_classifiers.vm_id)
+            sf_dp_list= {}
+            sf_dp_list[sfc_classifiers.port_id] = sf_dp
+            self._get_sff_list_for_chain(account,sf_dp_list)
+
+            if sf_dp.sff_name is None:
+                logger.error("SFF not found for port %s, VM: %s",sfc_classifiers.port_id,sfc_classifiers.vm_id) 
+            else:
+                logger.info("SFF with name %s  found for port %s, VM: %s",sf_dp.sff_name, sfc_classifiers.port_id,sfc_classifiers.vm_id) 
+                scl_sff_name = sf_dp.sff_name
+        else:
+            rsp_url = self._get_rest_url(account,self._get_rsp_path.format(sfc_classifiers.rsp_name))
+            r = requests.get(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'})
+            if r.status_code == 200:
+                rsp_data = r.json()
+                if 'rendered-service-path' in rsp_data and len(rsp_data['rendered-service-path'][0]['rendered-service-path-hop']) > 0:
+                    scl_sff_name = rsp_data['rendered-service-path'][0]['rendered-service-path-hop'][0]['service-function-forwarder']
+        
+        logger.debug("SFF for classifer %s found is %s",sfc_classifiers.name, scl_sff_name)        
+        scl_sff['name'] = scl_sff_name
+        #scl_sff['interface'] = sff_intf_name
+        sf_classifier_dict['scl-service-function-forwarder'].append(scl_sff)
+
+        sf_classifier_json['service-function-classifier'] = sf_classifier_dict
+
+        sfcl_data = json.dumps(sf_classifier_json)
+        sfcl_url = self._get_rest_url(account,self._service_function_classifier_path.format(sfc_classifiers.name))
+        print(sfcl_url)
+        print(sfcl_data)
+        r=requests.put(sfcl_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfcl_data)
+        r.raise_for_status()
+
+    def _add_acl_rules(self, account,sfc_classifiers):
+        "Create ACL rules"
+        access_list_json = {}
+        access_list_dict = {}
+        acl_entry_list = list()
+        acl_list_dict = {}
+        for acl_rule in sfc_classifiers.match_attributes:
+            acl_entry = {} 
+            acl_entry['rule-name']  = acl_rule.name
+            acl_entry['actions'] = {}
+            #acl_entry['actions']['netvirt-sfc-acl:rsp-name'] = sfc_classifiers.rsp_name
+            acl_entry['actions']['service-function-acl:rendered-service-path'] = sfc_classifiers.rsp_name
+
+            matches = {}
+            for field, value in acl_rule.as_dict().items():
+                if field == 'ip_proto':
+                    matches['protocol'] = value
+                elif field == 'source_ip_address':
+                    matches['source-ipv4-network'] = value
+                elif field == 'destination_ip_address':
+                    matches['destination-ipv4-network'] = value
+                elif field == 'source_port':
+                    matches['source-port-range'] = {'lower-port':value, 'upper-port':value}
+                elif field == 'destination_port':
+                    matches['destination-port-range'] = {'lower-port':value, 'upper-port':value}
+            acl_entry['matches'] = matches
+            acl_entry_list.append(acl_entry)    
+        acl_list_dict['ace'] = acl_entry_list 
+        access_list_dict['acl-name'] = sfc_classifiers.name
+        access_list_dict['access-list-entries'] = acl_list_dict
+        access_list_json['acl'] = access_list_dict
+
+        acl_data = json.dumps(access_list_json)
+        acl_url = self._get_rest_url(account,self._access_list_path.format(sfc_classifiers.name))
+        print(acl_url)
+        print(acl_data)
+        r=requests.put(acl_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=acl_data)
+        r.raise_for_status()
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/CMakeLists.txt
new file mode 100644
index 0000000..90e06b4
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/CMakeLists.txt
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwsdn_sim rwsdn_sim.py)
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/rwsdn_sim.py b/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/rwsdn_sim.py
new file mode 100644
index 0000000..3061265
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/rwsdn_sim.py
@@ -0,0 +1,113 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import itertools
+import logging
+import os
+import uuid
+import time
+
+import ipaddress
+
+import gi
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwSdn', '1.0')
+from gi.repository import (
+    GObject,
+    RwSdn, # Vala package
+    RwTypes,
+    RwsdnYang,
+    #IetfL2TopologyYang as l2Tl,
+    RwTopologyYang as RwTl,
+    )
+
+import rw_status
+import rwlogger
+
+from rift.topmgr.sdnsim import SdnSim
+
+
+logger = logging.getLogger('rwsdn.sdnsim')
+
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class SdnSimPlugin(GObject.Object, RwSdn.Topology):
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self.sdnsim = SdnSim()
+        
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    subcategory="sdnsim",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_sdn_creds(self, account):
+        """
+        Validates the sdn account credentials for the specified account.
+        Performs an access to the resources using Keystone API. If creds
+        are not valid, returns an error code & reason string
+
+        @param account - a SDN account
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwsdnYang.SdnConnectionStatus()
+        print("SDN Successfully connected")
+        status.status = "success"
+        status.details = "Connection was successful"
+        #logger.debug('Done with validate SDN creds: %s', type(status))
+        return status
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network_list(self, account):
+        """
+        Returns the list of discovered networks
+
+        @param account - a SDN account
+
+        """
+        logger.debug('Get network list: ')
+        nwtop = self.sdnsim.get_network_list( account)
+        logger.debug('Done with get network list: %s', type(nwtop))
+        return nwtop
diff --git a/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt
new file mode 100644
index 0000000..00cde0b
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt
@@ -0,0 +1,37 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# 
+
+##
+# Parse the yang files
+##
+
+include(rift_yang)
+
+set(source_yang_files rwsdn.yang)
+
+rift_add_yang_target(
+  TARGET rwsdn_yang
+  YANG_FILES ${source_yang_files}
+  COMPONENT ${PKG_LONG_NAME}
+  LIBRARIES
+    rwschema_yang_gen
+    rwyang
+    rwlog
+    rwlog-mgmt_yang_gen
+    mano-types_yang_gen
+)
+
diff --git a/rwlaunchpad/plugins/rwvns/yang/Makefile b/rwlaunchpad/plugins/rwvns/yang/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/yang/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang b/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang
new file mode 100644
index 0000000..5ea2eb0
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang
@@ -0,0 +1,402 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rwsdn
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rwsdn";
+  prefix "rwsdn";
+
+  import rw-base {
+    prefix rwbase;
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-log {
+    prefix "rwlog";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+
+  revision 2014-12-30 {
+    description
+        "Initial revision.";
+    reference
+        "RIFT RWSDN cloud data";
+  }
+
+  typedef sdn-connection-status-enum {
+    description "Connection status for the sdn account";
+    type enumeration {
+      enum unknown;
+      enum validating;
+      enum success;
+      enum failure;
+    }
+  }
+
+  grouping connection-status {
+    container connection-status {
+      config false;
+      rwpb:msg-new SdnConnectionStatus;
+      leaf status {
+        type sdn-connection-status-enum;
+      }
+      leaf details {
+        type string;
+      }
+    }
+  }
+
+  uses connection-status;
+
+  typedef sdn-account-type {
+    description "SDN account type";
+    type enumeration {
+      enum odl;
+      enum mock;
+      enum sdnsim;
+    }
+  }
+
+  grouping sdn-provider-auth {
+    leaf account-type {
+      type sdn-account-type;
+    }
+
+    choice provider-specific-info {
+      container odl {
+        leaf username {
+          type string {
+            length "1..255";
+          }
+        }
+
+        leaf password {
+          type string {
+            length "1..32";
+          }
+        }
+
+        leaf url {
+          type string {
+            length "1..255";
+          }
+        }
+        leaf plugin-name {
+          type string;
+          default "rwsdn_odl";
+        }
+      }
+      container mock {
+        leaf username {
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwsdn_mock";
+        }
+      }
+
+      container sdnsim {
+        leaf username {
+          type string;
+        }
+        leaf topology-source {
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwsdn_sim";
+        }
+      }
+    }
+  }
+
+  container sdn-accounts {
+    list sdn-account-list {
+      rwpb:msg-new SDNAccount;
+      key "name";
+
+      leaf name {
+        type string;
+      }
+
+      uses sdn-provider-auth;
+      uses connection-status;
+    }
+  }
+
+  container vnffgs {
+    list vnffg-chain {
+      key "name";
+      rwpb:msg-new VNFFGChain;
+
+      leaf name {
+        type string;
+      }
+
+      list vnf-chain-path {
+        key "order";
+        leaf order {
+          type uint32;
+          description " Order of the VNF in VNFFG chain";
+        }
+        leaf service-function-type {
+          type string;
+        }
+        leaf nsh-aware {
+          type boolean;
+        }
+        leaf transport-type {
+          type string;
+        }
+        list vnfr-ids {
+          key "vnfr-id";
+          leaf vnfr-id {
+            type yang:uuid;
+          }
+          leaf vnfr-name {
+            type string;
+          }
+          leaf mgmt-address {
+            type inet:ip-address;
+          }
+          leaf mgmt-port {
+              type inet:port-number;
+          }
+          list vdu-list {
+            key "vm-id port-id";
+            leaf port-id {
+              rwpb:field-inline "true";
+              rwpb:field-string-max 64;
+              type string;
+            }
+            leaf vm-id {
+              rwpb:field-inline "true";
+              rwpb:field-string-max 64;
+              type string;
+            }
+            leaf name {
+              type string;
+            }
+            leaf address {
+              type inet:ip-address;
+            }
+            leaf port {
+              type inet:port-number;
+            }
+          }
+          leaf sff-name {
+            description "SFF name useful for non OVS based SFF";
+            type string;
+          } 
+        }
+      }
+      list sff {
+        rwpb:msg-new VNFFGSff;
+        key "name"; 
+        leaf name {
+          type string;
+        }
+        leaf function-type {
+          type string;
+        }
+        leaf mgmt-address {
+          type inet:ip-address;
+        }
+        leaf mgmt-port {
+          type inet:port-number;
+        }
+        list dp-endpoints {
+          key "name";
+          leaf name {
+           type string;
+          } 
+          leaf address {
+            type inet:ip-address;
+          }
+          leaf port {
+            type inet:port-number;
+          }
+        }
+        list vnfr-list {
+          key "vnfr-name";
+          leaf vnfr-name {
+            type string;
+          }
+        }
+      }
+      leaf classifier-name {
+        type string;
+      }
+    }
+  }
+
+  container vnffg-rendered-paths {
+    rwpb:msg-new VNFFGRenderedPaths;
+    list vnffg-rendered-path {
+      key "name";
+      rwpb:msg-new VNFFGRenderedPath;
+      config false;
+      leaf name {
+        type string;
+      }
+      leaf path-id {
+          description
+              "Unique Identifier for the service path";
+        type uint32;
+      }
+      list rendered-path-hop {
+        key "hop-number";
+        leaf hop-number {
+          type uint8;
+        }
+        leaf service-index {
+            description
+                "Location within the service path";
+          type uint8;
+        }
+        leaf vnfr-name {
+          type string;
+        }
+        container service-function-forwarder {
+          leaf name { 
+            description
+                "Service Function Forwarder name";
+            type string;
+          }
+          leaf ip-address {
+            description
+                "Service Function Forwarder Data Plane IP address";
+            type inet:ip-address;
+          }  
+          leaf port {
+            description
+                "Service Function Forwarder Data Plane port";
+            type inet:port-number;
+          }  
+        }
+      }
+    }
+  }
+
+
+  container vnffg-classifiers {
+    list vnffg-classifier {
+      key "name";
+      rwpb:msg-new VNFFGClassifier;
+
+      leaf name {
+        type string;
+      }
+      leaf rsp-name {
+        type string;
+      }
+      leaf port-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+      leaf vm-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+      leaf sff-name {
+        type string;
+      }
+      container vnffg-metadata {
+        leaf ctx1 {
+          type string;
+        }
+        leaf ctx2 {
+          type string;
+        }
+        leaf ctx3 {
+          type string;
+        }
+        leaf ctx4 {
+          type string;
+        }
+      }
+      list match-attributes {
+        description
+            "List of match attributes.";
+        key "name";
+        leaf name {
+          description
+              "Name for the Access list";
+          type string;  
+        }
+
+        leaf ip-proto {
+          description
+              "IP Protocol.";
+          type uint8;
+        }
+
+        leaf source-ip-address {
+          description
+              "Source IP address.";
+          type inet:ip-prefix;
+        }
+
+        leaf destination-ip-address {
+          description
+              "Destination IP address.";
+          type inet:ip-prefix;
+        }
+
+        leaf source-port {
+          description
+              "Source port number.";
+          type inet:port-number;
+        }
+
+        leaf destination-port {
+          description
+              "Destination port number.";
+          type inet:port-number;
+        }
+      } //match-attributes
+    }
+  }
+
+}
+
+/* vim: set ts=2:sw=2: */
diff --git a/rwlaunchpad/plugins/vala/CMakeLists.txt b/rwlaunchpad/plugins/vala/CMakeLists.txt
new file mode 100644
index 0000000..c9c20d7
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/CMakeLists.txt
@@ -0,0 +1,25 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 10/31/2015
+# 
+
+set(subdirs
+  rwve_vnfm_em
+  rwve_vnfm_vnf
+  rwos_ma_nfvo
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/plugins/vala/Makefile b/rwlaunchpad/plugins/vala/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/vala/rwos_ma_nfvo/CMakeLists.txt b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/CMakeLists.txt
new file mode 100644
index 0000000..b0919bd
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/CMakeLists.txt
@@ -0,0 +1,64 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 10/31/2015
+# 
+
+##
+# Allow specific compiler warnings
+##
+rift_allow_compiler_warning(unused-but-set-variable)
+
+set(VALA_NAME rwos_ma_nfvo)
+set(VALA_FILES ${VALA_NAME}.vala)
+set(VALA_VERSION 1.0)
+set(VALA_RELEASE 1)
+set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION})
+set(VALA_TYPELIB_PREFIX RwOsMaNfvo-${VALA_VERSION})
+
+rift_add_vala(
+  ${VALA_LONG_NAME}
+  VALA_FILES ${VALA_FILES}
+  VALA_PACKAGES 
+    rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0
+    rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0
+
+  #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+  #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+  GENERATE_HEADER_FILE ${VALA_NAME}.h
+
+  GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
+  GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
+  GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
+  GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
+  #DEPENDS rwcal_yang rwlog_gi rwschema_yang
+  )
+
+rift_install_vala_artifacts(
+  HEADER_FILES ${VALA_NAME}.h
+  SO_FILES lib${VALA_LONG_NAME}.so
+  VAPI_FILES ${VALA_LONG_NAME}.vapi
+  GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
+  TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
+  COMPONENT ${PKG_LONG_NAME}
+  DEST_PREFIX .
+  )
+
+
+set(subdirs
+  rwos_ma_nfvo_rest
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo.vala b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo.vala
new file mode 100644
index 0000000..63e4601
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo.vala
@@ -0,0 +1,16 @@
+namespace RwOsMaNfvo {
+
+  public interface Orchestrator: GLib.Object {
+    /*
+     * Init routine
+     */
+    public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx);
+
+    /*
+     * Notify the EM of lifecycle event
+     */
+    public abstract RwTypes.RwStatus ns_lifecycle_event();
+  }
+}
+
+
diff --git a/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/CMakeLists.txt b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/CMakeLists.txt
new file mode 100644
index 0000000..f9ec32f
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/CMakeLists.txt
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwos_ma_nfvo_rest rwos_ma_nfvo_rest.py)
diff --git a/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/Makefile b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/rwos_ma_nfvo_rest.py b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/rwos_ma_nfvo_rest.py
new file mode 100644
index 0000000..3ac9429
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/rwos_ma_nfvo_rest.py
@@ -0,0 +1,51 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+import logging
+import rw_status
+import rwlogger
+import subprocess, os
+
+import gi
+gi.require_version('RwOsMaNfvo', '1.0')
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+    GObject,
+    RwOsMaNfvo,
+    RwTypes)
+
+logger = logging.getLogger('rwos-ma-nfvo-rest')
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({ IndexError: RwTypes.RwStatus.NOTFOUND,
+                                             KeyError: RwTypes.RwStatus.NOTFOUND,
+                                             NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,})
+
+class RwOsMaNfvoRestPlugin(GObject.Object, RwOsMaNfvo.Orchestrator):
+    """This class implements the Ve-Vnfm VALA methods."""
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(rwlogger.RwLogger(subcategory="rwos-ma-nfvo-rest",
+                                                log_hdl=rwlog_ctx,))
+        
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_em/CMakeLists.txt b/rwlaunchpad/plugins/vala/rwve_vnfm_em/CMakeLists.txt
new file mode 100644
index 0000000..12ff14c
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwve_vnfm_em/CMakeLists.txt
@@ -0,0 +1,64 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 10/31/2015
+# 
+
+##
+# Allow specific compiler warnings
+##
+rift_allow_compiler_warning(unused-but-set-variable)
+
+set(VALA_NAME rwve_vnfm_em)
+set(VALA_FILES ${VALA_NAME}.vala)
+set(VALA_VERSION 1.0)
+set(VALA_RELEASE 1)
+set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION})
+set(VALA_TYPELIB_PREFIX RwVeVnfmEm-${VALA_VERSION})
+
+rift_add_vala(
+  ${VALA_LONG_NAME}
+  VALA_FILES ${VALA_FILES}
+  VALA_PACKAGES 
+    rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0
+    rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0
+
+  #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+  #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+  GENERATE_HEADER_FILE ${VALA_NAME}.h
+
+  GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
+  GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
+  GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
+  GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
+  #DEPENDS rwcal_yang rwlog_gi rwschema_yang
+  )
+
+rift_install_vala_artifacts(
+  HEADER_FILES ${VALA_NAME}.h
+  SO_FILES lib${VALA_LONG_NAME}.so
+  VAPI_FILES ${VALA_LONG_NAME}.vapi
+  GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
+  TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
+  COMPONENT ${PKG_LONG_NAME}
+  DEST_PREFIX .
+  )
+
+
+set(subdirs
+  rwve_vnfm_em_rest
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em.vala b/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em.vala
new file mode 100644
index 0000000..3da25f9
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em.vala
@@ -0,0 +1,16 @@
+namespace RwVeVnfmEm {
+
+  public interface ElementManager: GLib.Object {
+    /*
+     * Init routine
+     */
+    public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx);
+
+    /*
+     * Notify the EM of lifecycle event
+     */
+    public abstract RwTypes.RwStatus vnf_lifecycle_event();
+  }
+}
+
+
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/CMakeLists.txt b/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/CMakeLists.txt
new file mode 100644
index 0000000..6efbd40
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/CMakeLists.txt
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwve_vnfm_em_rest rwve_vnfm_em_rest.py)
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/Makefile b/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/rwve_vnfm_em_rest.py b/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/rwve_vnfm_em_rest.py
new file mode 100644
index 0000000..c7147a4
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/rwve_vnfm_em_rest.py
@@ -0,0 +1,54 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+import logging
+import rw_status
+import rwlogger
+import subprocess, os
+
+import gi
+gi.require_version('RwVeVnfmEm', '1.0')
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+    GObject,
+    RwVeVnfmEm,
+    RwTypes)
+
+logger = logging.getLogger('rw_ve_vnfm_em.rest')
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({ IndexError: RwTypes.RwStatus.NOTFOUND,
+                                             KeyError: RwTypes.RwStatus.NOTFOUND,
+                                             NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,})
+
+class RwVeVnfmEmRestPlugin(GObject.Object, RwVeVnfmEm.ElementManager):
+    """This class implements the Ve-Vnfm VALA methods."""
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(rwlogger.RwLogger(subcategory="rwcal-aws",
+                                                log_hdl=rwlog_ctx,))
+    @rwstatus
+    def do_vnf_lifecycle_event(self):
+        pass
+        
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/CMakeLists.txt b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/CMakeLists.txt
new file mode 100644
index 0000000..190763d
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/CMakeLists.txt
@@ -0,0 +1,64 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 10/31/2015
+# 
+
+##
+# Allow specific compiler warnings
+##
+rift_allow_compiler_warning(unused-but-set-variable)
+
+set(VALA_NAME rwve_vnfm_vnf)
+set(VALA_FILES ${VALA_NAME}.vala)
+set(VALA_VERSION 1.0)
+set(VALA_RELEASE 1)
+set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION})
+set(VALA_TYPELIB_PREFIX RwVeVnfmVnf-${VALA_VERSION})
+
+rift_add_vala(
+  ${VALA_LONG_NAME}
+  VALA_FILES ${VALA_FILES}
+  VALA_PACKAGES 
+    rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0
+    rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0
+
+  #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+  #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+  GENERATE_HEADER_FILE ${VALA_NAME}.h
+
+  GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
+  GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
+  GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
+  GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
+  #DEPENDS rwcal_yang rwlog_gi rwschema_yang
+  )
+
+rift_install_vala_artifacts(
+  HEADER_FILES ${VALA_NAME}.h
+  SO_FILES lib${VALA_LONG_NAME}.so
+  VAPI_FILES ${VALA_LONG_NAME}.vapi
+  GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
+  TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
+  COMPONENT ${PKG_LONG_NAME}
+  DEST_PREFIX .
+  )
+
+
+set(subdirs
+  rwve_vnfm_vnf_rest
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf.vala b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf.vala
new file mode 100644
index 0000000..6b5e84e
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf.vala
@@ -0,0 +1,16 @@
+namespace RwVeVnfmVnf {
+
+  public interface Vnf: GLib.Object {
+    /*
+     * Init routine
+     */
+    public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx);
+
+    /*
+     * Notify the EM of lifecycle event
+     */
+    public abstract RwTypes.RwStatus get_monitoring_param();
+  }
+}
+
+
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/CMakeLists.txt b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/CMakeLists.txt
new file mode 100644
index 0000000..e890eaa
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/CMakeLists.txt
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwve_vnfm_vnf_rest rwve_vnfm_vnf_rest.py)
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/Makefile b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/rwve_vnfm_vnf_rest.py b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/rwve_vnfm_vnf_rest.py
new file mode 100644
index 0000000..dad3321
--- /dev/null
+++ b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/rwve_vnfm_vnf_rest.py
@@ -0,0 +1,54 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+import logging
+import rw_status
+import rwlogger
+import subprocess, os
+
+import gi
+gi.require_version('RwVeVnfmVnf', '1.0')
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+    GObject,
+    RwVeVnfmVnf,
+    RwTypes)
+
+logger = logging.getLogger('rwve-vnfm-vnf-rest')
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({ IndexError: RwTypes.RwStatus.NOTFOUND,
+                                             KeyError: RwTypes.RwStatus.NOTFOUND,
+                                             NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,})
+
+class RwVeVnfmVnfRestPlugin(GObject.Object, RwVeVnfmVnf.Vnf):
+    """This class implements the Ve-Vnfm VALA methods."""
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(rwlogger.RwLogger(subcategory="rwve-vnfm-vnf-rest",
+                                                log_hdl=rwlog_ctx,))
+
+    @rwstatus
+    def do_get_monitoring_param(self):
+        pass
+        
diff --git a/rwlaunchpad/plugins/yang/CMakeLists.txt b/rwlaunchpad/plugins/yang/CMakeLists.txt
new file mode 100644
index 0000000..aa5846a
--- /dev/null
+++ b/rwlaunchpad/plugins/yang/CMakeLists.txt
@@ -0,0 +1,79 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tom Seidenberg
+# Creation Date: 2014/04/08
+# 
+
+set(source_yang_files
+  rw-launchpad.yang
+  rw-monitor.yang
+  rw-nsm.yang
+  rw-resource-mgr.yang
+  rw-vnfm.yang
+  rw-vns.yang
+  rw-image-mgmt.yang
+  )
+
+##
+# Yang targets
+##
+rift_generate_python_log_yang(
+    LOG_CATEGORY_NAME rw-mano-log
+    START_EVENT_ID 65000
+    OUT_YANG_FILE_VAR rw_mano_log_file
+    )
+
+rift_generate_python_log_yang(
+    LOG_CATEGORY_NAME rw-monitor-log
+    START_EVENT_ID 64000
+    OUT_YANG_FILE_VAR rw_monitor_log_file
+    )
+
+rift_generate_python_log_yang(
+    LOG_CATEGORY_NAME rw-mon-params-log
+    START_EVENT_ID 67000
+    OUT_YANG_FILE_VAR rw_mon_params_log_file
+    )
+
+rift_generate_python_log_yang(
+    LOG_CATEGORY_NAME rw-resource-mgr-log
+    START_EVENT_ID 69000
+    OUT_YANG_FILE_VAR rw_resource_mgr_log_file
+    )
+
+rift_add_yang_target(
+  TARGET rwlaunchpad_yang
+  YANG_FILES
+    ${source_yang_files}
+    ${rw_mano_log_file}
+    ${rw_monitor_log_file}
+    ${rw_mon_params_log_file}
+    ${rw_resource_mgr_log_file}
+  COMPONENT ${PKG_LONG_NAME}
+  LIBRARIES
+    mano_yang_gen
+    rwcloud_yang_gen
+    rw_conman_yang_gen
+    rwconfig_agent_yang_gen
+    mano-types_yang_gen
+  DEPENDS
+    mano_yang
+    rwcloud_yang
+    rw_conman_yang
+    rwconfig_agent_yang
+    mano-types_yang
+)
+
diff --git a/rwlaunchpad/plugins/yang/Makefile b/rwlaunchpad/plugins/yang/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwlaunchpad/plugins/yang/Makefile
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/yang/rw-image-mgmt.tailf.yang b/rwlaunchpad/plugins/yang/rw-image-mgmt.tailf.yang
new file mode 100644
index 0000000..0184a9a
--- /dev/null
+++ b/rwlaunchpad/plugins/yang/rw-image-mgmt.tailf.yang
@@ -0,0 +1,45 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-image-mgmt-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-image-mgmt-annotation";
+  prefix "rw-image-mgmt-ann";
+
+  import rw-image-mgmt {
+    prefix rw-image-mgmt;
+  }
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  tailf:annotate "/rw-image-mgmt:upload-jobs" {
+    tailf:callpoint rw_callpoint;
+  }
+
+  tailf:annotate "/rw-image-mgmt:create-upload-job" {
+    tailf:actionpoint rw_actionpoint;
+  }
+
+  tailf:annotate "/rw-image-mgmt:cancel-upload-job" {
+    tailf:actionpoint rw_actionpoint;
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-image-mgmt.yang b/rwlaunchpad/plugins/yang/rw-image-mgmt.yang
new file mode 100644
index 0000000..833931f
--- /dev/null
+++ b/rwlaunchpad/plugins/yang/rw-image-mgmt.yang
@@ -0,0 +1,255 @@
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+/**
+ * @file rw-image-mgmt.yang
+ * @author Austin Cormier
+ * @date 2016/06/01
+ * @brief Image Management Yang
+ */
+
+module rw-image-mgmt
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-image-mgmt";
+  prefix "rw-image-mgmt";
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import rw-cloud {
+    prefix "rwcloud";
+  }
+
+  import rwcal {
+    prefix "rwcal";
+  }
+
+  revision 2016-06-01 {
+    description
+      "Initial revision.";
+  }
+
+  typedef job-status {
+    type enumeration {
+      enum QUEUED;
+      enum IN_PROGRESS;
+      enum CANCELLING;
+      enum CANCELLED;
+      enum COMPLETED;
+      enum FAILED;
+    }
+  }
+
+  typedef upload-task-status {
+    type enumeration {
+      enum QUEUED;
+      enum CHECK_IMAGE_EXISTS;
+      enum UPLOADING;
+      enum CANCELLING;
+      enum CANCELLED;
+      enum COMPLETED;
+      enum FAILED;
+    }
+  }
+
+  grouping image-upload-info {
+    leaf image-id {
+      description "The image id that exists in the image catalog";
+      type string;
+    }
+
+    leaf image-name {
+      description "The image name that exists in the image catalog";
+      type string;
+    }
+
+    leaf image-checksum {
+      description "The image md5 checksum";
+      type string;
+    }
+  }
+
+  grouping upload-task-status {
+    leaf status {
+      description "The status of the upload task";
+      type upload-task-status;
+      default QUEUED;
+    }
+
+    leaf detail {
+      description "Detailed upload status message";
+      type string;
+    }
+
+    leaf progress-percent {
+      description "The image upload progress percentage (0-100)";
+      type uint8;
+      default 0;
+    }
+
+    leaf bytes_written {
+      description "The number of bytes written";
+      type uint64;
+      default 0;
+    }
+
+    leaf bytes_total {
+      description "The total number of bytes to write";
+      type uint64;
+      default 0;
+    }
+
+    leaf bytes_per_second {
+      description "The total number of bytes written per second";
+      type uint32;
+      default 0;
+    }
+
+    leaf start-time {
+      description "The image upload start time (unix epoch)";
+      type uint32;
+    }
+
+    leaf stop-time {
+      description "The image upload stop time (unix epoch)";
+      type uint32;
+    }
+  }
+
+  grouping upload-task {
+    leaf cloud-account {
+      description "The cloud account to upload the image to";
+      type leafref {
+        path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+      }
+    }
+
+    uses image-upload-info;
+    uses upload-task-status;
+  }
+
+  container upload-jobs {
+    rwpb:msg-new UploadJobs;
+    description "Image upload jobs";
+    config false;
+
+    list job {
+      rwpb:msg-new UploadJob;
+      key "id";
+
+      leaf id {
+        description "Unique image upload job-id";
+        type uint32;
+      }
+
+      leaf status {
+        description "Current job status";
+        type job-status;
+      }
+
+      leaf start-time {
+        description "The job start time (unix epoch)";
+        type uint32;
+      }
+
+      leaf stop-time {
+        description "The job stop time (unix epoch)";
+        type uint32;
+      }
+
+      list upload-tasks {
+        rwpb:msg-new UploadTask;
+        description "The upload tasks that are part of this job";
+        uses upload-task;
+      }
+    }
+  }
+
+  rpc create-upload-job {
+    input {
+      rwpb:msg-new CreateUploadJob;
+
+      choice image-selection {
+        case onboarded-image {
+          description "Use an image previously onboarded in the image catalog";
+          container onboarded-image {
+            uses image-upload-info;
+          }
+        }
+
+        case external-url {
+          description "Use an HTTP URL to pull the image from";
+
+          container external-url {
+            leaf image-url {
+              description "The image HTTP URL to pull the image from";
+              type string;
+            }
+
+            uses image-upload-info;
+
+            leaf disk-format {
+              description "Format of the Disk";
+              type rwcal:disk-format;
+            }
+
+            leaf container-format {
+              description "Format of the container";
+              type rwcal:container-format;
+              default "bare";
+            }
+          }
+        }
+      }
+
+      leaf-list cloud-account {
+        description "List of cloud accounts to upload the image to";
+        type leafref {
+          path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+        }
+      }
+    }
+
+    output {
+      rwpb:msg-new CreateUploadJobOutput;
+      leaf job-id {
+        description "The upload job-id to cancel";
+        type uint32;
+      }
+    }
+  }
+
+  rpc cancel-upload-job {
+    input {
+      rwpb:msg-new CancelUploadJob;
+      leaf job-id {
+        type uint32;
+      }
+    }
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang b/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang
new file mode 100644
index 0000000..1fab791
--- /dev/null
+++ b/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang
@@ -0,0 +1,37 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-launchpad-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-launchpad-annotation";
+  prefix "rw-launchpad-ann";
+
+  import rw-launchpad {
+    prefix rw-launchpad;
+  }
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  tailf:annotate "/rw-launchpad:datacenters" {
+    tailf:callpoint rw_callpoint;
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-launchpad.yang b/rwlaunchpad/plugins/yang/rw-launchpad.yang
new file mode 100644
index 0000000..37a9c85
--- /dev/null
+++ b/rwlaunchpad/plugins/yang/rw-launchpad.yang
@@ -0,0 +1,207 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+
+
+/**
+ * @file rw-launchpad.yang
+ * @author Joshua Downer
+ * @date 2015/09/14
+ * @brief Launchpad Yang
+ */
+
+module rw-launchpad
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-launchpad";
+  prefix "rw-launchpad";
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rwcal {
+    prefix "rwcal";
+  }
+
+  import rw-vnfd {
+    prefix "rw-vnfd";
+  }
+
+  import vld {
+    prefix "vld";
+  }
+
+  import rw-nsd {
+    prefix "rw-nsd";
+  }
+
+  import rw-cloud {
+    prefix "rw-cloud";
+  }
+
+  import rw-nsr {
+    prefix "rw-nsr";
+  }
+
+  import rw-conman {
+    prefix "rw-conman";
+  }
+
+  import rw-config-agent {
+    prefix "rw-config-agent";
+  }
+
+  import rw-monitor {
+    prefix "rw-monitor";
+  }
+
+  import rw-image-mgmt {
+    prefix "rw-image-mgmt";
+  }
+
+  revision 2015-09-14 {
+    description
+      "Initial revision.";
+  }
+
+  container datacenters {
+    description "OpenMano data centers";
+
+    rwpb:msg-new DataCenters;
+    config false;
+
+    list cloud-accounts {
+      description
+          "A list of OpenMano cloud accounts that have data centers associated
+          with them";
+
+      rwpb:msg-new CloudAccount;
+      key "name";
+
+      leaf name {
+        description "The name of the cloud account";
+        type leafref {
+          path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+        }
+      }
+
+      list datacenters {
+        rwpb:msg-new DataCenter;
+        leaf uuid {
+          description "The UUID of the data center";
+          type yang:uuid;
+        }
+
+        leaf name {
+          description "The name of the data center";
+          type string;
+        }
+      }
+    }
+  }
+
+  typedef resource-orchestrator-account-type {
+    description "RO account type";
+    type enumeration {
+      enum rift-ro;
+      enum openmano;
+    }
+  }
+
+  container resource-orchestrator {
+    rwpb:msg-new ResourceOrchestrator;
+
+    leaf name {
+       type string;
+    }
+
+    leaf account-type {
+      type resource-orchestrator-account-type;
+    }
+
+    choice resource-orchestrator {
+      description
+        "The resource orchestrator to use by the Launchpad";
+      default rift-ro;
+
+      case rift-ro {
+        description
+          "Use the RIFT.io resource orchestrator";
+
+        container rift-ro {
+          leaf rift-ro {
+            type empty;
+          }
+        }
+      }
+
+      case openmano {
+        description
+          "Use OpenMano as RO";
+
+        container openmano {
+          leaf host {
+            type string;
+            default "localhost";
+          }
+
+          leaf port {
+            type uint16;
+            default 9090;
+          }
+
+          leaf tenant-id {
+            type string {
+              length "36";
+            }
+            mandatory true;
+          }
+        }
+      }
+    }
+  }
+
+  container launchpad-config {
+    leaf public-ip {
+      description
+          "An IP address that can, at least, be reached by the host that the
+          launchpad is running on. This is not a mandatory but is required for
+          alarms to function correctly.";
+      type string;
+    }
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-monitor.yang b/rwlaunchpad/plugins/yang/rw-monitor.yang
new file mode 100644
index 0000000..559880d
--- /dev/null
+++ b/rwlaunchpad/plugins/yang/rw-monitor.yang
@@ -0,0 +1,70 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+/**
+ * @file rw-monitor.yang
+ * @author Joshua Downer
+ * @date 2015/10/30
+ * @brief NFVI Monitor
+ */
+
+module rw-monitor
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-monitor";
+  prefix "rw-monitor";
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-cloud {
+    prefix "rw-cloud";
+  }
+
+  import rw-nsr {
+    prefix "rw-nsr";
+  }
+
+  import rw-vnfr {
+    prefix "rw-vnfr";
+  }
+
+  import rwcal {
+    prefix "rwcal";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  revision 2015-10-30 {
+    description
+      "Initial revision.";
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-nsm.yang b/rwlaunchpad/plugins/yang/rw-nsm.yang
new file mode 100644
index 0000000..4e6d9aa
--- /dev/null
+++ b/rwlaunchpad/plugins/yang/rw-nsm.yang
@@ -0,0 +1,133 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+/**
+ * @file rw-nsm.yang
+ * @author Rajesh Velandy
+ * @date 2015/10/07
+ * @brief NSM  yang
+ */
+
+module rw-nsm
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-nsm";
+  prefix "rw-nsm";
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  import rw-nsd {
+    prefix "rw-nsd";
+  }
+  import nsd {
+    prefix "nsd";
+  }
+  import rw-nsr {
+    prefix "rw-nsr";
+  }
+  import vld {
+    prefix "vld";
+  }
+  import rw-vlr {
+    prefix "rw-vlr";
+  }
+  import rw-vns {
+    prefix "rw-vns";
+  }
+  import rw-vnfd {
+    prefix "rw-vnfd";
+  }
+  import vnfd {
+    prefix "vnfd";
+  }
+  import rw-vnfr {
+    prefix "rw-vnfr";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-launchpad {
+    prefix "rw-launchpad";
+  }
+
+  import rw-cloud {
+    prefix "rw-cloud";
+  }
+
+  import rw-sdn {
+    prefix "rw-sdn";
+  }
+
+  import rw-config-agent {
+    prefix "rw-config-agent";
+  }
+
+  revision 2015-10-07 {
+    description
+      "Initial revision.";
+  }
+
+  grouping cm-endpoint {
+    leaf cm-ip-address {
+      type inet:ip-address;
+      description "IP Address";
+      default "127.0.0.1";
+    }
+    leaf cm-port {
+      type inet:port-number;
+      description "Port Number";
+      default 2022;
+    }
+    leaf cm-username {
+      description "RO endpoint username";
+      type string;
+      default "admin";
+    }
+    leaf cm-password {
+      description "RO endpoint password";
+      type string;
+      default "admin";
+    }
+  }
+
+  container ro-config {
+    description "Resource Orchestrator endpoint ip address";
+    rwpb:msg-new "roConfig";
+    rwcli:new-mode "ro-config";
+
+    container cm-endpoint {
+      description "Service Orchestrator endpoint ip address";
+      rwpb:msg-new "SoEndpoint";
+      uses cm-endpoint;
+    }
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang b/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang
new file mode 100644
index 0000000..6b6e8b1
--- /dev/null
+++ b/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang
@@ -0,0 +1,42 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-resource-mgr-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-resource-mgr-annotation";
+  prefix "rw-resource-mgr-ann";
+
+  import rw-resource-mgr
+  {
+    prefix rw-resource-mgr;
+  }
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  tailf:annotate "/rw-resource-mgr:resource-pool-records" {
+    tailf:callpoint rw_callpoint;
+  }
+
+  tailf:annotate "/rw-resource-mgr:resource-mgmt" {
+    tailf:callpoint rw_callpoint;
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-resource-mgr.yang b/rwlaunchpad/plugins/yang/rw-resource-mgr.yang
new file mode 100644
index 0000000..9bf914a
--- /dev/null
+++ b/rwlaunchpad/plugins/yang/rw-resource-mgr.yang
@@ -0,0 +1,309 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+module rw-resource-mgr
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-resource-mgr";
+  prefix "rw-resource-mgr";
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-cloud {
+    prefix "rwcloud";
+  }
+
+  import rwcal {
+    prefix "rwcal";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  revision 2015-10-16 {
+    description
+      "Initial revision.";
+  }
+
+  grouping resource-pool-info {
+    leaf name {
+      description "Name of the resource pool";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+      //mandatory true;
+    }
+
+    leaf resource-type {
+      description "Type of resource";
+      type enumeration {
+        enum compute;
+        enum network;
+      }
+    }
+
+    leaf pool-type {
+      description "Type of pool";
+      type enumeration {
+        enum static;
+        enum dynamic;
+      }
+      default "static";
+    }
+
+    leaf max-size {
+      description "Maximum size to which a dynamic resource pool can grow";
+      type uint32;
+    }
+
+  }
+
+  container resource-mgr-config {
+    description "Data model for configuration of resource-mgr";
+    rwpb:msg-new ResourceManagerConfig;
+    config true;
+
+    container management-domain {
+      leaf name {
+        description "The management domain name this launchpad is associated with.";
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+        //mandatory true;
+      }
+    }
+
+    container resource-pools {
+      description "Resource Pool configuration";
+      rwpb:msg-new ResourcePools;
+      list cloud-account {
+        key "name";
+        leaf name {
+          description
+            "Resource pool for the configured cloud account";
+          type leafref {
+            path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+          }
+        }
+      }
+    }
+  }
+
+  grouping resource-state {
+    leaf resource-state {
+      type enumeration {
+        enum inactive;
+        enum active;
+        enum pending;
+        enum failed;
+      }
+    }
+    leaf resource-errors {
+      description "Error message details in case of failed resource state";
+      type string;
+    }
+  }
+
+  container resource-mgmt {
+    description "Resource management ";
+    config false;
+
+    container vdu-event {
+      description "Events for VDU Management";
+      rwpb:msg-new VDUEvent;
+
+      list vdu-event-data {
+        rwpb:msg-new VDUEventData;
+        key "event-id";
+
+        leaf event-id {
+          description "Identifier associated with the VDU transaction";
+          type yang:uuid;
+        }
+
+        leaf cloud-account {
+          description "The cloud account to use for this resource request";
+          type leafref {
+            path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+          }
+        }
+
+        container request-info {
+          description "Information about required resource";
+
+          uses rwcal:vdu-create-params;
+        }
+
+        container resource-info {
+          description "Information about allocated resource";
+          leaf pool-name {
+            type string;
+          }
+          uses resource-state;
+          uses rwcal:vdu-info-params;
+        }
+      }
+    }
+
+    container vlink-event {
+      description "Events for Virtual Link management";
+      rwpb:msg-new VirtualLinkEvent;
+
+      list vlink-event-data {
+        rwpb:msg-new VirtualLinkEventData;
+
+        key "event-id";
+
+        leaf event-id {
+          description "Identifier associated with the Virtual Link transaction";
+          type yang:uuid;
+        }
+
+        leaf cloud-account {
+          description "The cloud account to use for this resource request";
+          type leafref {
+            path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+          }
+        }
+
+        container request-info {
+          description "Information about required resource";
+
+          uses rwcal:virtual-link-create-params;
+        }
+
+        container resource-info {
+          leaf pool-name {
+            type string;
+          }
+          uses resource-state;
+          uses rwcal:virtual-link-info-params;
+        }
+      }
+    }
+  }
+
+
+  container resource-pool-records {
+    description "Resource Pool Records";
+    rwpb:msg-new ResourcePoolRecords;
+    config false;
+
+    list cloud-account {
+      key "name";
+      leaf name {
+        description
+          "The configured cloud account's pool records.";
+        type leafref {
+          path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+        }
+      }
+
+      list records {
+        rwpb:msg-new ResourceRecordInfo;
+        key "name";
+        uses resource-pool-info;
+
+        leaf pool-status {
+          type enumeration {
+            enum unknown;
+            enum locked;
+            enum unlocked;
+          }
+        }
+
+        leaf total-resources {
+          type uint32;
+        }
+
+        leaf free-resources {
+          type uint32;
+        }
+
+        leaf allocated-resources {
+          type uint32;
+        }
+      }
+    }
+  }
+
+
+  container resource-mgr-data{
+    description "Resource Manager operational data";
+    config false;
+
+    container pool-record {
+      description "Resource Pool record";
+
+      list cloud {
+        key "name";
+        max-elements 16;
+        rwpb:msg-new "ResmgrCloudPoolRecords";
+        leaf name {
+          description
+            "The configured cloud account's pool records.";
+          type leafref {
+            path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+          }
+        }
+
+        list records {
+          key "name";
+          uses resource-pool-info;
+
+          list free-vdu-list {
+            key vdu-id;
+            uses rwcal:vdu-info-params;
+          }
+
+          list in-use-vdu-list {
+            key vdu-id;
+            uses rwcal:vdu-info-params;
+          }
+
+          list free-vlink-list {
+            key virtual-link-id;
+            uses rwcal:virtual-link-info-params;
+          }
+
+          list in-use-vlink-list {
+              key virtual-link-id;
+            uses rwcal:virtual-link-info-params;
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-vnfm.yang b/rwlaunchpad/plugins/yang/rw-vnfm.yang
new file mode 100644
index 0000000..25e1abb
--- /dev/null
+++ b/rwlaunchpad/plugins/yang/rw-vnfm.yang
@@ -0,0 +1,78 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+/**
+ * @file rw-vnfm.yang
+ * @author Rajesh Velandy
+ * @date 2015/10/07
+ * @brief VNFM  yang
+ */
+
+module rw-vnfm
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-vnfm";
+  prefix "rw-vnfm";
+
+  import vld {
+    prefix "vld";
+  }
+
+  import vlr {
+    prefix "vlr";
+  }
+
+  import rw-vlr {
+    prefix "rw-vlr";
+  }
+
+  import rw-vns {
+    prefix "rw-vns";
+  }
+
+  import rw-vnfd {
+    prefix "rw-vnfd";
+  }
+
+  import rw-vnfr {
+    prefix "rw-vnfr";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-manifest {
+    prefix "rw-manifest";
+  }
+
+  import rw-resource-mgr {
+    prefix "rw-resource-mgr";
+  }
+
+  import rw-launchpad {
+    prefix "rw-launchpad";
+  }
+
+  revision 2015-10-07 {
+    description
+      "Initial revision.";
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-vns.yang b/rwlaunchpad/plugins/yang/rw-vns.yang
new file mode 100644
index 0000000..0036e16
--- /dev/null
+++ b/rwlaunchpad/plugins/yang/rw-vns.yang
@@ -0,0 +1,96 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+/**
+ * @file rw-vns.yang
+ * @author Austin Cormier
+ * @date 2015/10/06
+ * @brief Virtual Network Service Yang
+ */
+
+module rw-vns
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-vns";
+  prefix "rw-vns";
+
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rwcal {
+    prefix "rwcal";
+  }
+
+  import rwsdn {
+    prefix "rwsdn";
+  }
+
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import rw-vlr {
+    prefix "rwvlr";
+  }
+
+  import vld {
+    prefix "vld";
+  }
+
+  import ietf-network {
+    prefix "nw";
+  }
+
+  import ietf-network-topology {
+    prefix "nt";
+  }
+
+  import ietf-l2-topology {
+    prefix "l2t";
+  }
+
+  import rw-topology {
+    prefix "rw-topology";
+  }
+
+  import rw-resource-mgr {
+    prefix "rw-resource-mgr";
+  }
+
+  import rw-sdn {
+    prefix "rw-sdn";
+  }
+
+  revision 2015-10-05 {
+    description
+      "Initial revision.";
+  }
+}
diff --git a/rwlaunchpad/ra/CMakeLists.txt b/rwlaunchpad/ra/CMakeLists.txt
new file mode 100644
index 0000000..cd07b92
--- /dev/null
+++ b/rwlaunchpad/ra/CMakeLists.txt
@@ -0,0 +1,117 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 09/16/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+install(
+  PROGRAMS
+    pingpong_longevity_systest
+    pingpong_vnf_systest
+    pingpong_records_systest
+    pingpong_vnf_reload_systest
+    pingpong_lp_ha_systest
+    pingpong_recovery_systest
+    pingpong_scaling_systest
+    scaling_systest
+  DESTINATION usr/rift/systemtest/pingpong_vnf
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  PROGRAMS
+    multi_vm_vnf_slb_systest.sh
+    multi_vm_vnf_trafgen_systest.sh
+  DESTINATION usr/rift/systemtest/multi_vm_vnf
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    pytest/multivm_vnf/conftest.py
+    pytest/multivm_vnf/test_multi_vm_vnf_slb.py
+    pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py
+    pytest/multivm_vnf/test_trafgen_data.py
+  DESTINATION usr/rift/systemtest/pytest/multi_vm_vnf
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  PROGRAMS
+    launchpad_longevity_systest
+    launchpad_systest
+  DESTINATION usr/rift/systemtest/launchpad
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    racfg/multi_tenant_systest_openstack.racfg
+  DESTINATION usr/rift/systemtest/launchpad
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    pytest/conftest.py
+    pytest/test_launchpad.py
+    pytest/test_launchpad_longevity.py
+    pytest/test_start_standby.py
+    pytest/test_failover.py
+  DESTINATION usr/rift/systemtest/pytest/system
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    pytest/ns/conftest.py
+    pytest/ns/test_onboard.py
+  DESTINATION usr/rift/systemtest/pytest/system/ns
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    pytest/ns/pingpong/test_pingpong.py
+    pytest/ns/pingpong/test_pingpong_longevity.py
+    pytest/ns/pingpong/test_records.py
+    pytest/ns/pingpong/test_scaling.py
+  DESTINATION usr/rift/systemtest/pytest/system/ns/pingpong
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    pytest/ns/haproxy/test_scaling.py
+  DESTINATION usr/rift/systemtest/pytest/system/ns/haproxy
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    racfg/pingpong_vnf_systest_cloudsim.racfg
+    racfg/pingpong_vnf_systest_openstack.racfg
+    racfg/pingpong_scaling_systest_openstack.racfg
+    racfg/pingpong_records_systest_cloudsim.racfg
+    racfg/pingpong_records_systest_openstack.racfg
+    racfg/pingpong_records_systest_openstack_xml.racfg
+    racfg/pingpong_vnf_reload_systest_openstack.racfg
+    racfg/pingpong_vnf_reload_systest_openstack_xml.racfg
+    racfg/scaling_systest.racfg
+    racfg/recovery_systest.racfg
+    racfg/pingpong_lp_ha_systest_openstack.racfg
+  DESTINATION usr/rift/systemtest/pingpong_vnf
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    racfg/multivm_vnf_slb_systest.racfg
+    racfg/multivm_vnf_trafgen_systest.racfg
+  DESTINATION usr/rift/systemtest/multi_vm_vnf
+  COMPONENT ${PKG_LONG_NAME})
diff --git a/rwlaunchpad/ra/launchpad_longevity_systest b/rwlaunchpad/ra/launchpad_longevity_systest
new file mode 100755
index 0000000..f4370aa
--- /dev/null
+++ b/rwlaunchpad/ra/launchpad_longevity_systest
@@ -0,0 +1,56 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2016/01/04
+#
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+SCRIPT_TEST="py.test -x -v \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/test_launchpad_longevity.py"
+
+test_cmd=""
+repeat=10
+repeat_keyword="longevity"
+repeat_system=1
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+test_rc=0
+for i in $(seq ${repeat_system});
+do
+	echo "CYCLE: $i"
+	eval ${test_cmd}
+	test_rc=$?
+	echo "DEBUG: Got test command rc: $test_rc"
+	if [[ ${test_rc} -ne 0 ]]; then
+		echo "Exiting with test_rc: $test_rc"
+		break
+	 fi
+done
+
+# unit test XML files are converted to pretty printed format
+pretty_print_junit_xml
+
+exit ${test_rc}
diff --git a/rwlaunchpad/ra/launchpad_systest b/rwlaunchpad/ra/launchpad_systest
new file mode 100755
index 0000000..54dfd6e
--- /dev/null
+++ b/rwlaunchpad/ra/launchpad_systest
@@ -0,0 +1,39 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2016/07/12
+#
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+SCRIPT_TEST="py.test -x -v \
+            ${PYTEST_DIR}/system/test_launchpad.py"
+
+test_cmd=""
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
+
diff --git a/rwlaunchpad/ra/multi_vm_vnf_slb_systest.sh b/rwlaunchpad/ra/multi_vm_vnf_slb_systest.sh
new file mode 100755
index 0000000..a2a1059
--- /dev/null
+++ b/rwlaunchpad/ra/multi_vm_vnf_slb_systest.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Karun Ganesharatnam
+# Creation Date: 02/26/2016
+# 
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -vvv \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/multi_vm_vnf/test_multi_vm_vnf_slb.py \
+            ${PYTEST_DIR}/multi_vm_vnf/test_trafgen_data.py"
+
+test_cmd=""
+
+# Parse command-line argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables and create the mvv image
+mvv=true
+create_mvv_image_file
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
diff --git a/rwlaunchpad/ra/multi_vm_vnf_trafgen_systest.sh b/rwlaunchpad/ra/multi_vm_vnf_trafgen_systest.sh
new file mode 100755
index 0000000..c88b95a
--- /dev/null
+++ b/rwlaunchpad/ra/multi_vm_vnf_trafgen_systest.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Karun Ganesharatnam
+# Creation Date: 02/26/2016
+# 
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+
+SCRIPT_TEST="py.test -x -vvv \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/multi_vm_vnf/test_multi_vm_vnf_trafgen.py \
+            ${PYTEST_DIR}/multi_vm_vnf/test_trafgen_data.py"
+
+test_cmd=""
+
+# Parse command-line argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables and create the mvv image
+mvv=true
+create_mvv_image_file
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
diff --git a/rwlaunchpad/ra/pingpong_longevity_systest b/rwlaunchpad/ra/pingpong_longevity_systest
new file mode 100755
index 0000000..7728f7f
--- /dev/null
+++ b/rwlaunchpad/ra/pingpong_longevity_systest
@@ -0,0 +1,43 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2016/01/04
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/ns//test_onboard.py \
+            ${PYTEST_DIR}/system/ns/pingpong/test_records.py \
+            ${PYTEST_DIR}/system/ns/pingpong/test_pingpong_longevity.py"
+
+test_cmd=""
+repeat_keyword="longevity"
+repeat=10
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
diff --git a/rwlaunchpad/ra/pingpong_lp_ha_systest b/rwlaunchpad/ra/pingpong_lp_ha_systest
new file mode 100755
index 0000000..5647168
--- /dev/null
+++ b/rwlaunchpad/ra/pingpong_lp_ha_systest
@@ -0,0 +1,44 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Varun Prasad
+# Creation Date: 19-Feb-2016
+# 
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+  ${PYTEST_DIR}/system/test_launchpad.py \
+  ${PYTEST_DIR}/system/ns/test_onboard.py \
+  ${PYTEST_DIR}/system/ns/pingpong/test_records.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Force standalone launchpad
+lp_standalone=true
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
+
diff --git a/rwlaunchpad/ra/pingpong_records_systest b/rwlaunchpad/ra/pingpong_records_systest
new file mode 100755
index 0000000..5897714
--- /dev/null
+++ b/rwlaunchpad/ra/pingpong_records_systest
@@ -0,0 +1,41 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2015/09/15
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+restconf=true
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/ns/test_onboard.py \
+            ${PYTEST_DIR}/system/ns/pingpong/test_records.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
diff --git a/rwlaunchpad/ra/pingpong_recovery_systest b/rwlaunchpad/ra/pingpong_recovery_systest
new file mode 100755
index 0000000..b4cd426
--- /dev/null
+++ b/rwlaunchpad/ra/pingpong_recovery_systest
@@ -0,0 +1,44 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Varun Prasad
+# Creation Date: 29-Mar-2016
+# 
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -v -p no:cacheprovider --recovery --no-update \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/ns/test_onboard.py \
+            ${PYTEST_DIR}/system/ns/pingpong/test_records.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# We want to run the test in expanded mode
+collapsed_mode=false
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
+
diff --git a/rwlaunchpad/ra/pingpong_scaling_systest b/rwlaunchpad/ra/pingpong_scaling_systest
new file mode 100755
index 0000000..eca3ee6
--- /dev/null
+++ b/rwlaunchpad/ra/pingpong_scaling_systest
@@ -0,0 +1,50 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2015/07/13
+#
+
+if [ -z $AUTO_TASK_ID ]; then
+    AUTO_TASK_ID=1
+    export AUTO_TASK_ID
+fi
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+
+SCRIPT_TEST="py.test -x -s -p no:cacheprovider \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/ns/pingpong/test_scaling.py"
+
+test_prefix="pingpong_scaling_systest"
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
+
+# display scaling log
+scaling_log="${RIFT_ARTIFACTS}/scaling_${AUTO_TASK_ID}.log"
+cat ${scaling_log}
diff --git a/rwlaunchpad/ra/pingpong_vnf_reload_systest b/rwlaunchpad/ra/pingpong_vnf_reload_systest
new file mode 100755
index 0000000..609b1d4
--- /dev/null
+++ b/rwlaunchpad/ra/pingpong_vnf_reload_systest
@@ -0,0 +1,45 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Varun Prasad
+# Creation Date: 2016/01/04
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider -k 'not Teardown or test_stop_launchpad' \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/ns/test_onboard.py \
+            ${PYTEST_DIR}/system/ns/pingpong/test_records.py"
+
+REBOOT_SCRIPT_TEST="py.test -x -v -p no:cacheprovider -k 'test_wait_for_launchpad_started or test_wait_for_pingpong_configured or test_wait_for_pingpong_configured or Teardown' \
+                    ${PYTEST_DIR}/system/test_launchpad.py \
+                    ${PYTEST_DIR}/system/ns/test_onboard.py \
+                    ${PYTEST_DIR}/system/ns/pingpong/test_records.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
diff --git a/rwlaunchpad/ra/pingpong_vnf_systest b/rwlaunchpad/ra/pingpong_vnf_systest
new file mode 100755
index 0000000..24cd303
--- /dev/null
+++ b/rwlaunchpad/ra/pingpong_vnf_systest
@@ -0,0 +1,40 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2015/09/15
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/ns/test_onboard.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
diff --git a/rwlaunchpad/ra/pytest/conftest.py b/rwlaunchpad/ra/pytest/conftest.py
new file mode 100644
index 0000000..fc094fa
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/conftest.py
@@ -0,0 +1,131 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import pytest
+import os
+import subprocess
+import sys
+
+import rift.auto.log
+import rift.auto.session
+import rift.vcs.vcs
+import logging
+
+import gi
+gi.require_version('RwCloudYang', '1.0')
+
+from gi.repository import RwCloudYang
+
+@pytest.fixture(scope='session')
+def cloud_name_prefix():
+    '''fixture which returns the prefix used in cloud account names'''
+    return 'cloud'
+
+@pytest.fixture(scope='session')
+def cloud_account_name(cloud_name_prefix):
+    '''fixture which returns the name used to identify the cloud account'''
+    return '{prefix}-0'.format(prefix=cloud_name_prefix)
+
+@pytest.fixture(scope='session')
+def sdn_account_name():
+    '''fixture which returns the name used to identify the sdn account'''
+    return 'sdn-0'
+
+@pytest.fixture(scope='session')
+def sdn_account_type():
+    '''fixture which returns the account type used by the sdn account'''
+    return 'odl'
+
+@pytest.fixture(scope='session')
+def cloud_module():
+    '''Fixture containing the module which defines cloud account
+    Returns:
+        module to be used when configuring a cloud account
+    '''
+    return RwCloudYang
+
+@pytest.fixture(scope='session')
+def cloud_xpath():
+    '''Fixture containing the xpath that should be used to configure a cloud account
+    Returns:
+        xpath to be used when configure a cloud account
+    '''
+    return '/cloud/account'
+
+@pytest.fixture(scope='session')
+def cloud_accounts(cloud_module, cloud_name_prefix, cloud_host, cloud_user, cloud_tenants, cloud_type):
+    '''fixture which returns a list of CloudAccounts. One per tenant provided
+
+    Arguments:
+        cloud_module        - fixture: module defining cloud account
+        cloud_name_prefix   - fixture: name prefix used for cloud account
+        cloud_host          - fixture: cloud host address
+        cloud_user          - fixture: cloud account user key
+        cloud_tenants       - fixture: list of tenants to create cloud accounts on
+        cloud_type          - fixture: cloud account type
+
+    Returns:
+        A list of CloudAccounts
+    '''
+    accounts = []
+    for idx, cloud_tenant in enumerate(cloud_tenants):
+        cloud_account_name = "{prefix}-{idx}".format(prefix=cloud_name_prefix, idx=idx)
+
+        if cloud_type == 'lxc':
+            accounts.append(
+                    cloud_module.CloudAccount.from_dict({
+                        "name": cloud_account_name,
+                        "account_type": "cloudsim_proxy"})
+            )
+        elif cloud_type == 'openstack':
+            password = 'mypasswd'
+            auth_url = 'http://{cloud_host}:5000/v3/'.format(cloud_host=cloud_host)
+            mgmt_network = os.getenv('MGMT_NETWORK', 'private')
+            accounts.append(
+                    cloud_module.CloudAccount.from_dict({
+                        'name':  cloud_account_name,
+                        'account_type': 'openstack',
+                        'openstack': {
+                            'admin': True,
+                            'key': cloud_user,
+                            'secret': password,
+                            'auth_url': auth_url,
+                            'tenant': cloud_tenant,
+                            'mgmt_network': mgmt_network}})
+            )
+        elif cloud_type == 'mock':
+            accounts.append(
+                    cloud_module.CloudAccount.from_dict({
+                        "name": cloud_account_name,
+                        "account_type": "mock"})
+            )
+
+    return accounts
+
+
+@pytest.fixture(scope='session', autouse=True)
+def cloud_account(cloud_accounts):
+    '''fixture which returns an instance of CloudAccount
+
+    Arguments:
+        cloud_accounts - fixture: list of generated cloud accounts
+
+    Returns:
+        An instance of CloudAccount
+    '''
+    return cloud_accounts[0]
+
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py b/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py
new file mode 100644
index 0000000..a3c565b
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py
@@ -0,0 +1,139 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import gi
+import shlex
+import pytest
+import os
+import subprocess
+import tempfile
+
+from gi.repository import (
+    NsdYang,
+    NsrYang,
+    RwNsrYang,
+    RwVnfrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+@pytest.fixture(scope='session', autouse=True)
+def cloud_account_name(request):
+    '''fixture which returns the name used to identify the cloud account'''
+    return 'cloud-0'
+
+@pytest.fixture(scope='session')
+def launchpad_host(request, confd_host):
+    return confd_host
+
+@pytest.fixture(scope='session')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+@pytest.fixture(scope='session')
+def vnfr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VnfrYang)
+
+@pytest.fixture(scope='session')
+def rwvnfr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfrYang)
+
+@pytest.fixture(scope='session')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+@pytest.fixture(scope='session')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+@pytest.fixture(scope='session')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+@pytest.fixture(scope='session')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+@pytest.fixture(scope='session')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+@pytest.fixture(scope='session')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+@pytest.fixture(scope='session')
+def mvv_descr_dir(request):
+    """root-directory of descriptors files used for Multi-VM VNF"""
+    return os.path.join(
+        os.environ["RIFT_INSTALL"],
+        "demos/tests/multivm_vnf"
+        )
+
+@pytest.fixture(scope='session')
+def package_dir(request):
+    return tempfile.mkdtemp(prefix="mvv_")
+
+@pytest.fixture(scope='session')
+def trafgen_vnfd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --descriptor-type='vnfd' --format='xml' --infile='{infile}' --outdir='{outdir}'".format(
+            pkg_scr=package_gen_script,
+            outdir=package_dir,
+            infile=os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_trafgen_vnfd.xml'))
+    pkg_file = os.path.join(package_dir, 'multivm_trafgen_vnfd.tar.gz')
+    command = shlex.split(pkg_cmd)
+    print("Running the command arguments: %s" % command)
+    command = [package_gen_script,
+               "--descriptor-type", "vnfd",
+               "--format", "xml",
+               "--infile", "%s" % os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_trafgen_vnfd.xml'),
+               "--outdir", "%s" % package_dir]
+    print("Running new command arguments: %s" % command)
+    subprocess.check_call(command)
+    return pkg_file
+
+@pytest.fixture(scope='session')
+def trafsink_vnfd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --descriptor-type='vnfd' --format='xml' --infile='{infile}' --outdir='{outdir}'".format(
+            pkg_scr=package_gen_script,
+            outdir=package_dir,
+            infile=os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_trafsink_vnfd.xml'))
+    pkg_file = os.path.join(package_dir, 'multivm_trafsink_vnfd.tar.gz')
+    command = shlex.split(pkg_cmd)
+    print("Running the command arguments: %s" % command)
+    command = [package_gen_script,
+               "--descriptor-type", "vnfd",
+               "--format", "xml",
+               "--infile", "%s" % os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_trafsink_vnfd.xml'),
+               "--outdir", "%s" % package_dir]
+    print("Running new command arguments: %s" % command)
+    subprocess.check_call(command)
+    return pkg_file
+
+@pytest.fixture(scope='session')
+def slb_vnfd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --outdir {outdir} --infile {infile} --descriptor-type vnfd --format xml".format(
+            pkg_scr=package_gen_script,
+            outdir=package_dir,
+            infile=os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_slb_vnfd.xml'),
+            )
+    pkg_file = os.path.join(package_dir, 'multivm_slb_vnfd.tar.gz')
+    subprocess.check_call(shlex.split(pkg_cmd))
+    return pkg_file
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py
new file mode 100755
index 0000000..557518b
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py
@@ -0,0 +1,286 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_multi_vm_vnf_slb.py
+@author Karun Ganesharatnam (karun.ganesharatnam@riftio.com)
+@date 03/16/2016
+@brief Scriptable load-balancer test with multi-vm VNFs
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import shutil
+import subprocess
+import time
+import uuid
+
+from gi.repository import (
+    NsdYang,
+    NsrYang,
+    RwNsrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+import rift.auto.mano
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+@pytest.fixture(scope='module')
+def multi_vm_vnf_nsd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --outdir {outdir} --infile {infile} --descriptor-type nsd --format xml".format(
+            pkg_scr=package_gen_script,
+            outdir=package_dir,
+            infile=os.path.join(mvv_descr_dir, 'nsd/xml/multivm_tg_slb_ts_config_nsd.xml'),
+            )
+    pkg_file = os.path.join(package_dir, 'multivm_tg_slb_ts_config_nsd.tar.gz')
+    logger.debug("Generating NSD package: %s", pkg_file)
+    subprocess.check_call(shlex.split(pkg_cmd))
+    return pkg_file
+
+def create_nsr(nsd_id, input_param_list, cloud_account_name):
+    """
+    Create the NSR record object
+
+    Arguments:
+         nsd_id             -  NSD id
+         input_param_list - list of input-parameter objects
+
+    Return:
+         NSR object
+    """
+    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+
+    nsr.id = str(uuid.uuid4())
+    nsr.name = rift.auto.mano.resource_name(nsr.id)
+    nsr.short_name = "nsr_short_name"
+    nsr.description = "This is a description"
+    nsr.nsd_ref = nsd_id
+    nsr.admin_status = "ENABLED"
+    nsr.input_parameter.extend(input_param_list)
+    nsr.cloud_account = cloud_account_name
+
+    return nsr
+
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl --insecure -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=10, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete", transaction_id)
+    def check_status_onboard_status():
+        uri = 'http://%s:4567/api/upload/%s/state' % (host, transaction_id)
+        curl_cmd = 'curl --insecure {uri}'.format(
+                uri=uri
+                )
+        return subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    elapsed = 0
+    start = time.time()
+    while elapsed < timeout:
+        reply = check_status_onboard_status()
+        state = json.loads(reply)
+        if state["status"] == "success":
+            break
+
+        if state["status"] != "pending":
+            raise DescriptorOnboardError(state)
+
+        time.sleep(1)
+        elapsed = time.time() - start
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+    logger.info("Descriptor onboard was successful")
+
+
+@pytest.mark.setup('multivmvnf')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestMultiVmVnfSlb(object):
+    pkg_dir = None
+    @classmethod
+    def teardown_class(cls):
+        """ remove the temporary directory contains the descriptor packages
+        """
+        logger.debug("Removing the temporary package directory: %s", cls.pkg_dir)
+#         if not cls.pkg_dir is None:
+#            shutil.rmtree(cls.pkg_dir)
+
+    def test_onboard_trafgen_vnfd(self, logger, launchpad_host, vnfd_proxy, trafgen_vnfd_package_file):
+        TestMultiVmVnfSlb.pkg_dir = os.path.dirname(trafgen_vnfd_package_file)
+        logger.info("Onboarding trafgen vnfd package: %s", trafgen_vnfd_package_file)
+        trans_id = upload_descriptor(logger, trafgen_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should only be a single vnfd"
+        vnfd = vnfds[0]
+        assert vnfd.name == "multivm_trafgen_vnfd"
+
+    def test_onboard_trafsink_vnfd(self, logger, launchpad_host, vnfd_proxy, trafsink_vnfd_package_file):
+        TestMultiVmVnfSlb.pkg_dir = os.path.dirname(trafsink_vnfd_package_file)
+        logger.info("Onboarding trafsink vnfd package: %s", trafsink_vnfd_package_file)
+        trans_id = upload_descriptor(logger, trafsink_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should be two vnfds"
+        assert "multivm_trafsink_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_slb_vnfd(self, logger, launchpad_host, vnfd_proxy, slb_vnfd_package_file):
+        TestMultiVmVnfSlb.pkg_dir = os.path.dirname(slb_vnfd_package_file)
+        logger.info("Onboarding slb vnfd package: %s", slb_vnfd_package_file)
+        trans_id = upload_descriptor(logger, slb_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 3, "There should be two vnfds"
+        assert "multivm_slb_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_multi_vm_vnf_nsd(self, logger, launchpad_host, nsd_proxy, multi_vm_vnf_nsd_package_file):
+        logger.info("Onboarding tg_slb_ts nsd package: %s", multi_vm_vnf_nsd_package_file)
+        trans_id = upload_descriptor(logger, multi_vm_vnf_nsd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+        assert nsd.name == "multivm_tg_slb_ts_config_nsd"
+
+    def test_instantiate_multi_vm_vnf_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy, cloud_account_name):
+
+        def verify_input_parameters (running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (input_param.xpath == config_param.xpath and
+                    input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_nsr_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+        descr_value = "New NSD Description"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1= NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd.id, input_parameters, cloud_account_name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.id)
+        rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=360)
+
+
+@pytest.mark.teardown('multivmvnf')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestMultiVmVnfSlbTeardown(object):
+    def test_terminate_nsr(self, nsr_proxy, vnfr_proxy, rwnsr_proxy, logger):
+        """
+        Terminate the instance and check if the record is deleted.
+
+        Asserts:
+        1. NSR record is deleted from instance-config.
+
+        """
+        logger.debug("Terminating Multi VM VNF's NSR")
+
+        nsr_path = "/ns-instance-config"
+        nsr = rwnsr_proxy.get_config(nsr_path)
+
+        ping_pong = nsr.nsr[0]
+        rwnsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(ping_pong.id))
+        time.sleep(30)
+
+
+    def test_delete_records(self, nsd_proxy, vnfd_proxy):
+        """Delete the NSD & VNFD records
+
+        Asserts:
+            The records are deleted.
+        """
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            nsd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd_proxy.delete_config(xpath)
+
+        time.sleep(5)
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py
new file mode 100755
index 0000000..ca6e9b5
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py
@@ -0,0 +1,282 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_multi_vm_vnf_trafgen.py
+@author Karun Ganesharatnam (karun.ganesharatnam@riftio.com)
+@date 03/16/2016
+@brief Scriptable load-balancer test with multi-vm VNFs
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import shutil
+import subprocess
+import time
+import uuid
+
+from gi.repository import (
+    NsdYang,
+    NsrYang,
+    RwNsrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+import rift.auto.mano
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+@pytest.fixture(scope='module')
+def multi_vm_vnf_nsd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --descriptor-type='nsd' --format='xml' --infile='{infile}' --outdir='{outdir}'".format(
+            pkg_scr=package_gen_script,
+            infile=os.path.join(mvv_descr_dir, 'nsd/xml/multivm_tg_ts_config_nsd.xml'),
+            outdir=package_dir)
+    pkg_file = os.path.join(package_dir, 'multivm_tg_ts_config_nsd.tar.gz')
+    logger.debug("Generating NSD package: %s", pkg_file)
+    command = shlex.split(pkg_cmd)
+    print("Running the command arguments: %s" % command)
+    command = [package_gen_script,
+               "--descriptor-type", "nsd",
+               "--format", "xml",
+               "--infile", "%s" % os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_tg_ts_config_nsd.xml'),
+               "--outdir", "%s" % package_dir]
+    print("Running new command arguments: %s" % command)
+    subprocess.check_call(shlex.split(pkg_cmd))
+    return pkg_file
+
+def create_nsr(nsd_id, input_param_list, cloud_account_name):
+    """
+    Create the NSR record object
+
+    Arguments:
+         nsd_id             -  NSD id
+         input_param_list - list of input-parameter objects
+
+    Return:
+         NSR object
+    """
+    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+
+    nsr.id = str(uuid.uuid4())
+    nsr.name = rift.auto.mano.resource_name(nsr.id)
+    nsr.short_name = "nsr_short_name"
+    nsr.description = "This is a description"
+    nsr.nsd_ref = nsd_id
+    nsr.admin_status = "ENABLED"
+    nsr.input_parameter.extend(input_param_list)
+    nsr.cloud_account = cloud_account_name
+
+    return nsr
+
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl --insecure -F "descriptor=@{file}" http://{host}:4567/api/upload '.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=10, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete", transaction_id)
+    def check_status_onboard_status():
+        uri = 'http://%s:4567/api/upload/%s/state' % (host, transaction_id)
+        curl_cmd = 'curl --insecure {uri}'.format(
+                uri=uri
+                )
+        return subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    elapsed = 0
+    start = time.time()
+    while elapsed < timeout:
+        reply = check_status_onboard_status()
+        state = json.loads(reply)
+        if state["status"] == "success":
+            break
+
+        if state["status"] != "pending":
+            raise DescriptorOnboardError(state)
+
+        time.sleep(1)
+        elapsed = time.time() - start
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+    logger.info("Descriptor onboard was successful")
+
+
+@pytest.mark.setup('multivmvnf')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestMultiVmVnfTrafgenApp(object):
+    pkg_dir = None
+    @classmethod
+    def teardown_class(cls):
+        """ remove the temporary directory contains the descriptor packages
+        """
+        logger.debug("Removing the temporary package directory: %s", cls.pkg_dir)
+        if not cls.pkg_dir is None:
+            shutil.rmtree(cls.pkg_dir)
+
+    def test_onboard_trafgen_vnfd(self, logger, launchpad_host, vnfd_proxy, trafgen_vnfd_package_file):
+        TestMultiVmVnfTrafgenApp.pkg_dir = os.path.dirname(trafgen_vnfd_package_file)
+        logger.info("Onboarding trafgen vnfd package: %s", trafgen_vnfd_package_file)
+        trans_id = upload_descriptor(logger, trafgen_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should only be a single vnfd"
+        vnfd = vnfds[0]
+        assert vnfd.name == "multivm_trafgen_vnfd"
+
+    def test_onboard_trafsink_vnfd(self, logger, launchpad_host, vnfd_proxy, trafsink_vnfd_package_file):
+        TestMultiVmVnfTrafgenApp.pkg_dir = os.path.dirname(trafsink_vnfd_package_file)
+        logger.info("Onboarding trafsink vnfd package: %s", trafsink_vnfd_package_file)
+        trans_id = upload_descriptor(logger, trafsink_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should be two vnfds"
+        assert "multivm_trafsink_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_multi_vm_vnf_nsd(self, logger, launchpad_host, nsd_proxy, multi_vm_vnf_nsd_package_file):
+        logger.info("Onboarding tg_ts nsd package: %s", multi_vm_vnf_nsd_package_file)
+        trans_id = upload_descriptor(logger, multi_vm_vnf_nsd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+        assert nsd.name == "multivm_tg_ts_config_nsd"
+
+    def test_instantiate_multi_vm_vnf_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy, cloud_account_name):
+
+        def verify_input_parameters (running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (input_param.xpath == config_param.xpath and
+                    input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_nsr_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+        descr_value = "New NSD Description"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1= NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd.id, input_parameters, cloud_account_name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.id)
+        rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=360)
+
+
+@pytest.mark.teardown('multivmvnf')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestMultiVmVnfTrafgenAppTeardown(object):
+    def test_terminate_nsr(self, nsr_proxy, vnfr_proxy, rwnsr_proxy, logger):
+        """
+        Terminate the instance and check if the record is deleted.
+
+        Asserts:
+        1. NSR record is deleted from instance-config.
+
+        """
+        logger.debug("Terminating Multi VM VNF's NSR")
+
+        nsr_path = "/ns-instance-config"
+        nsr = rwnsr_proxy.get_config(nsr_path)
+
+        ping_pong = nsr.nsr[0]
+        rwnsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(ping_pong.id))
+        time.sleep(30)
+
+
+    def test_delete_records(self, nsd_proxy, vnfd_proxy):
+        """Delete the NSD & VNFD records
+
+        Asserts:
+            The records are deleted.
+        """
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            nsd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd_proxy.delete_config(xpath)
+
+        time.sleep(5)
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/test_trafgen_data.py b/rwlaunchpad/ra/pytest/multivm_vnf/test_trafgen_data.py
new file mode 100644
index 0000000..197e95c
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/test_trafgen_data.py
@@ -0,0 +1,218 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_trafgen_data.py
+@author Karun Ganesharatnam (karun.ganesharatnam@riftio.com)
+@date 03/16/2016
+@brief Scriptable load-balancer test with multi-vm VNFs
+"""
+
+import ipaddress
+import pytest
+import re
+import subprocess
+import time
+
+import rift.auto.session
+
+from gi.repository import (
+    RwTrafgenYang,
+    RwTrafgenDataYang,
+    RwVnfBaseOpdataYang,
+    RwVnfBaseConfigYang,
+    RwTrafgenYang
+)
+
+
+@pytest.fixture(scope='session')
+def trafgen_vnfr(request, rwvnfr_proxy, session_type):
+    vnfr = "/vnfr-catalog/vnfr"
+    vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
+    for vnfr in vnfrs.vnfr:
+        if 'trafgen' in vnfr.short_name:
+            return vnfr
+    assert False, "Not found the VNFR with name 'trafgen'"
+
+@pytest.fixture(scope='session')
+def trafgen_session(request, trafgen_vnfr, session_type):
+    trafgen_host = trafgen_vnfr.vnf_configuration.config_access.mgmt_ip_address
+    if session_type == 'netconf':
+        tg_session = rift.auto.session.NetconfSession(host=trafgen_host)
+    elif session_type == 'restconf':
+        tg_session = rift.auto.session.RestconfSession(host=trafgen_host)
+
+    tg_session.connect()
+    rift.vcs.vcs.wait_until_system_started(tg_session, 900)
+    return tg_session
+
+@pytest.fixture(scope='session')
+def trafgen_ports(request, trafgen_vnfr, session_type):
+    return [cp.name for cp in trafgen_vnfr.connection_point]
+
+@pytest.fixture(scope='module')
+def tgdata_proxy(trafgen_session):
+    '''fixture that returns a proxy to RwTrafgenDataYang'''
+    return trafgen_session.proxy(RwTrafgenDataYang)
+
+
+@pytest.fixture(scope='module')
+def tgcfg_proxy(trafgen_session):
+    '''fixture that returns a proxy to RwTrafgenYang'''
+    return trafgen_session.proxy(RwTrafgenYang)
+
+
+@pytest.fixture(scope='module')
+def vnfdata_proxy(trafgen_session):
+    '''fixture that returns a proxy to RwVnfBaseOpdataYang'''
+    return trafgen_session.proxy(RwVnfBaseOpdataYang)
+
+
+@pytest.fixture(scope='module')
+def vnfcfg_proxy(trafgen_session):
+    '''fixture that returns a proxy to RwVnfBaseConfigYang'''
+    return trafgen_session.proxy(RwVnfBaseConfigYang)
+
+
+def confirm_config(tgcfg_proxy, vnf_name):
+    '''To ensure the configuration is present for the given VNF
+
+    Arguments:
+        vnf_name - vnf name of configuration
+    '''
+    xpath = "/vnf-config/vnf[name='%s'][instance='0']" % vnf_name
+    for _ in range(24):
+        tg_config = tgcfg_proxy.get_config(xpath)
+        if tg_config is not None:
+            break
+        time.sleep(10)
+    else:
+        assert False, "Configuration check timeout"
+
+
+def start_traffic(tgdata_proxy, tgcfg_proxy, port_name):
+    '''Start traffic on the port with the specified name.
+
+    Arguments:
+        port_name - name of port on which to start traffic
+    '''
+    confirm_config(tgcfg_proxy, 'trafgen')
+    rpc_input = RwTrafgenDataYang.RwStartTrafgenTraffic.from_dict({
+        'vnf_name':'trafgen',
+        'vnf_instance':0,
+        'port_name':port_name
+    })
+    rpc_output = RwVnfBaseOpdataYang.YangOutput_RwVnfBaseOpdata_Start_VnfOutput()
+    tgdata_proxy.rpc(rpc_input, rpc_name='start', output_obj=rpc_output)
+
+
+def stop_traffic(tgdata_proxy, port_name):
+    '''Stop traffic on the port with the specified name.
+
+    Arguments:
+        port_name - name of port on which to stop traffic
+    '''
+    rpc_input = RwTrafgenDataYang.RwStopTrafgenTraffic.from_dict({
+        'vnf_name':'trafgen',
+        'vnf_instance':0,
+        'port_name':port_name
+    })
+    rpc_output = RwVnfBaseOpdataYang.YangOutput_RwVnfBaseOpdata_Stop_VnfOutput()
+    tgdata_proxy.rpc(rpc_input, rpc_name='stop', output_obj=rpc_output)
+
+
+def wait_for_traffic_started(vnfdata_proxy, vnf_name, port_name, timeout=120, interval=2, threshold=60):
+    '''Wait for traffic to be started on the specified port
+
+    Traffic is determined to be started if the input/output packets on the port
+    increment during the specified interval
+
+    Arguments:
+        port_name - name of the port being monitored
+        timeout - time allowed for traffic to start
+        interval - interval at which the counters should be checked
+        threhsold - values under the threshold treated as 0
+    '''
+    def value_incremented(previous_sample, current_sample):
+        '''Comparison that returns True if the the sampled counter increased
+        beyond the specified threshold during the sampling interval
+        otherwise returns false
+        '''
+        return (int(current_sample) - int(previous_sample)) > threshold
+
+    xpath = "/vnf-opdata/vnf[name='{}'][instance='0']/port-state[portname='{}']/counters/{}"
+    vnfdata_proxy.wait_for_interval(xpath.format(vnf_name, port_name, 'input-packets'),
+                                    value_incremented, timeout=timeout, interval=interval)
+
+
+def wait_for_traffic_stopped(vnfdata_proxy, vnf_name, port_name, timeout=60, interval=2, threshold=60):
+    '''Wait for traffic to be stopped on the specified port
+
+    Traffic is determined to be stopped if the input/output packets on the port
+    remain unchanged during the specified interval
+
+    Arguments:
+        port_name - name of the port being monitored
+        timeout - time allowed for traffic to start
+        interval - interval at which the counters should be checked
+        threshold - values under the threshold treated as 0
+    '''
+    def value_unchanged(previous_sample, current_sample):
+        '''Comparison that returns True if the the sampled counter increased
+        less than the specified threshold during the sampling interval
+        otherwise returns False
+        '''
+        return (int(current_sample) - int(previous_sample)) < threshold
+
+    xpath = "/vnf-opdata/vnf[name='{}'][instance='0']/port-state[portname='{}']/counters/{}"
+    vnfdata_proxy.wait_for_interval(xpath.format(vnf_name, port_name, 'input-packets'), value_unchanged, timeout=timeout, interval=interval)
+
+@pytest.mark.depends('multivmvnf')
+@pytest.mark.incremental
+class TestMVVSlbDataFlow:
+
+    def test_start_stop_traffic(self, vnfdata_proxy, tgdata_proxy, tgcfg_proxy, trafgen_ports):
+        ''' This test verfies that traffic can be stopped and started on
+        all trafgen ports.
+
+        Arguments:
+            vnfdata_proxy - proxy to retrieve vnf operational data
+            tgdata_proxy - proxy to retrieve trafgen operational data
+            tgcfg_proxy - proxy to retrieve trafgen configuration
+            trafgen_ports - list of port names on which traffic can be started
+        '''
+        time.sleep(300)
+        for port in trafgen_ports:
+            start_traffic(tgdata_proxy, tgcfg_proxy, port)
+            wait_for_traffic_started(vnfdata_proxy, 'trafgen', port)
+            stop_traffic(tgdata_proxy, port)
+            wait_for_traffic_stopped(vnfdata_proxy, 'trafgen',  port)
+
+
+    def test_start_traffic(self, vnfdata_proxy, tgdata_proxy, tgcfg_proxy, trafgen_ports):
+        ''' This test starts traffic on all trafgen ports in preperation for
+        subsequent tests
+
+        Arguments:
+            vnfdata_proxy - proxy to retrieve vnf operational data
+            tgdata_proxy - proxy to retrieve trafgen operational data
+            tgcfg_proxy - proxy to retrieve trafgen configuration
+            trafgen_ports - list of port names on which traffic can be started
+        '''
+        for port in trafgen_ports:
+            start_traffic(tgdata_proxy, tgcfg_proxy, port)
+            wait_for_traffic_started(vnfdata_proxy, 'trafgen', port)
diff --git a/rwlaunchpad/ra/pytest/ns/conftest.py b/rwlaunchpad/ra/pytest/ns/conftest.py
new file mode 100644
index 0000000..a1fa446
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/conftest.py
@@ -0,0 +1,292 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import functools
+import hashlib
+import pytest
+import os
+import tempfile
+import shutil
+import subprocess
+
+import gi
+import rift.auto.session
+import rift.mano.examples.ping_pong_nsd as ping_pong
+import rift.vcs.vcs
+
+class PackageError(Exception):
+    pass
+
+@pytest.fixture(scope='session', autouse=True)
+def cloud_account_name(request):
+    '''fixture which returns the name used to identify the cloud account'''
+    return 'cloud-0'
+
+@pytest.fixture(scope='session')
+def ping_pong_install_dir():
+    '''Fixture containing the location of ping_pong installation
+    '''
+    install_dir = os.path.join(
+        os.environ["RIFT_ROOT"],
+        "images"
+        )
+    return install_dir
+
+@pytest.fixture(scope='session')
+def ping_vnfd_package_file(ping_pong_install_dir):
+    '''Fixture containing the location of the ping vnfd package
+
+    Arguments:
+        ping_pong_install_dir - location of ping_pong installation
+    '''
+    ping_pkg_file = os.path.join(
+            ping_pong_install_dir,
+            "ping_vnfd_with_image.tar.gz",
+            )
+    if not os.path.exists(ping_pkg_file):
+        raise_package_error()
+
+    return ping_pkg_file
+
+
+@pytest.fixture(scope='session')
+def pong_vnfd_package_file(ping_pong_install_dir):
+    '''Fixture containing the location of the pong vnfd package
+
+    Arguments:
+        ping_pong_install_dir - location of ping_pong installation
+    '''
+    pong_pkg_file = os.path.join(
+            ping_pong_install_dir,
+            "pong_vnfd_with_image.tar.gz",
+            )
+    if not os.path.exists(pong_pkg_file):
+        raise_package_error()
+
+    return pong_pkg_file
+
+
+@pytest.fixture(scope='session')
+def ping_pong_nsd_package_file(ping_pong_install_dir):
+    '''Fixture containing the location of the ping_pong_nsd package
+
+    Arguments:
+        ping_pong_install_dir - location of ping_pong installation
+    '''
+    ping_pong_pkg_file = os.path.join(
+            ping_pong_install_dir,
+            "ping_pong_nsd.tar.gz",
+            )
+    if not os.path.exists(ping_pong_pkg_file):
+        raise_package_error()
+
+    return ping_pong_pkg_file
+
+@pytest.fixture(scope='session')
+def image_dirs():
+    ''' Fixture containing a list of directories where images can be found
+    '''
+    rift_build = os.environ['RIFT_BUILD']
+    rift_root = os.environ['RIFT_ROOT']
+    image_dirs = [
+        os.path.join(
+            rift_build,
+            "modules/core/mano/src/core_mano-build/examples/",
+            "ping_pong_ns/ping_vnfd_with_image/images"
+        ),
+        os.path.join(
+            rift_root,
+            "images"
+        )
+    ]
+    return image_dirs
+
+@pytest.fixture(scope='session')
+def image_paths(image_dirs):
+    ''' Fixture containing a mapping of image names to their path images
+
+    Arguments:
+        image_dirs - a list of directories where images are located
+    '''
+    image_paths = {}
+    for image_dir in image_dirs:
+        if os.path.exists(image_dir):
+            names = os.listdir(image_dir)
+            image_paths.update({name:os.path.join(image_dir, name) for name in names})
+    return image_paths
+
+@pytest.fixture(scope='session')
+def path_ping_image(image_paths):
+    ''' Fixture containing the location of the ping image
+
+    Arguments:
+        image_paths - mapping of images to their paths
+    '''
+    return image_paths["Fedora-x86_64-20-20131211.1-sda-ping.qcow2"]
+
+@pytest.fixture(scope='session')
+def path_pong_image(image_paths):
+    ''' Fixture containing the location of the pong image
+
+    Arguments:
+        image_paths - mapping of images to their paths
+    '''
+    return image_paths["Fedora-x86_64-20-20131211.1-sda-pong.qcow2"]
+
+class PingPongFactory:
+    def __init__(self, path_ping_image, path_pong_image, rsyslog_host, rsyslog_port):
+        self.path_ping_image = path_ping_image
+        self.path_pong_image = path_pong_image
+        self.rsyslog_host = rsyslog_host
+        self.rsyslog_port = rsyslog_port
+
+    def generate_descriptors(self):
+        '''Return a new set of ping and pong descriptors
+        '''
+        def md5sum(path):
+            with open(path, mode='rb') as fd:
+                md5 = hashlib.md5()
+                for buf in iter(functools.partial(fd.read, 4096), b''):
+                    md5.update(buf)
+            return md5.hexdigest()
+
+        ping_md5sum = md5sum(self.path_ping_image)
+        pong_md5sum = md5sum(self.path_pong_image)
+
+        ex_userdata = None
+        if self.rsyslog_host and self.rsyslog_port:
+            ex_userdata = '''
+rsyslog:
+  - "$ActionForwardDefaultTemplate RSYSLOG_ForwardFormat"
+  - "*.* @{host}:{port}"
+            '''.format(
+                host=self.rsyslog_host,
+                port=self.rsyslog_port,
+            )
+
+        descriptors = ping_pong.generate_ping_pong_descriptors(
+                pingcount=1,
+                ping_md5sum=ping_md5sum,
+                pong_md5sum=pong_md5sum,
+                ex_ping_userdata=ex_userdata,
+                ex_pong_userdata=ex_userdata,
+        )
+
+        return descriptors
+
+@pytest.fixture(scope='session')
+def ping_pong_factory(path_ping_image, path_pong_image, rsyslog_host, rsyslog_port):
+    '''Fixture returns a factory capable of generating ping and pong descriptors
+    '''
+    return PingPongFactory(path_ping_image, path_pong_image, rsyslog_host, rsyslog_port)
+
+@pytest.fixture(scope='session')
+def ping_pong_records(ping_pong_factory):
+    '''Fixture returns the default set of ping_pong descriptors
+    '''
+    return ping_pong_factory.generate_descriptors()
+
+
+@pytest.fixture(scope='session')
+def descriptors(request, ping_pong_records):
+    def pingpong_descriptors(with_images=True):
+        """Generated the VNFDs & NSD files for pingpong NS.
+
+        Returns:
+            Tuple: file path for ping vnfd, pong vnfd and ping_pong_nsd
+        """
+        ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records
+
+        tmpdir = tempfile.mkdtemp()
+        rift_build = os.environ['RIFT_BUILD']
+        MANO_DIR = os.path.join(
+                rift_build,
+                "modules/core/mano/src/core_mano-build/examples/ping_pong_ns")
+        ping_img = os.path.join(MANO_DIR, "ping_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2")
+        pong_img = os.path.join(MANO_DIR, "pong_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2")
+
+        """ grab cached copies of these files if not found. They may not exist 
+            because our git submodule dependency mgmt
+            will not populate these because they live in .build, not .install
+        """
+        if not os.path.exists(ping_img):
+            ping_img = os.path.join(
+                        os.environ['RIFT_ROOT'], 
+                        'images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2')
+            pong_img = os.path.join(
+                        os.environ['RIFT_ROOT'], 
+                        'images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2')
+
+        for descriptor in [ping_vnfd, pong_vnfd, ping_pong_nsd]:
+            descriptor.write_to_file(output_format='xml', outdir=tmpdir)
+
+        ping_img_path = os.path.join(tmpdir, "{}/images/".format(ping_vnfd.name))
+        pong_img_path = os.path.join(tmpdir, "{}/images/".format(pong_vnfd.name))
+
+        if with_images:
+            os.makedirs(ping_img_path)
+            os.makedirs(pong_img_path)
+            shutil.copy(ping_img, ping_img_path)
+            shutil.copy(pong_img, pong_img_path)
+
+        for dir_name in [ping_vnfd.name, pong_vnfd.name, ping_pong_nsd.name]:
+            subprocess.call([
+                    "sh",
+                    "{rift_install}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh".format(rift_install=os.environ['RIFT_INSTALL']),
+                    tmpdir,
+                    dir_name])
+
+        return (os.path.join(tmpdir, "{}.tar.gz".format(ping_vnfd.name)),
+                os.path.join(tmpdir, "{}.tar.gz".format(pong_vnfd.name)),
+                os.path.join(tmpdir, "{}.tar.gz".format(ping_pong_nsd.name)))
+
+    def haproxy_descriptors():
+        """HAProxy descriptors."""
+        files = [
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/http_client/http_client_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/httpd/httpd_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/haproxy/haproxy_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/waf/waf_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/haproxy_waf_httpd_nsd/haproxy_waf_httpd_nsd.tar.gz")
+            ]
+
+        return files
+
+    if request.config.option.network_service == "pingpong":
+        return pingpong_descriptors()
+    elif request.config.option.network_service == "pingpong_noimg":
+        return pingpong_descriptors(with_images=False)
+    elif request.config.option.network_service == "haproxy":
+        return haproxy_descriptors()
+
+
+@pytest.fixture(scope='session')
+def descriptor_images(request):
+    def haproxy_images():
+        """HAProxy images."""
+        images = [
+            os.path.join(os.getenv('RIFT_ROOT'), "images/haproxy-v03.qcow2"),
+            os.path.join(os.getenv('RIFT_ROOT'), "images/web-app-firewall-v02.qcow2"),
+            os.path.join(os.getenv('RIFT_ROOT'), "images/web-server-v02.qcow2")
+            ]
+
+        return images
+
+    if request.config.option.network_service == "haproxy":
+        return haproxy_images()
+
+    return []
diff --git a/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py b/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py
new file mode 100644
index 0000000..846ef2e
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py
@@ -0,0 +1,170 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import pytest
+
+from gi.repository import NsrYang, RwNsrYang, RwVnfrYang, NsdYang, RwNsdYang
+import rift.auto.session
+
+@pytest.fixture(scope='module')
+def proxy(request, mgmt_session):
+    return mgmt_session.proxy
+
+
+ScalingGroupInstance = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance
+ScalingGroup = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup
+
+INSTANCE_ID = 1
+
+
+@pytest.mark.depends('nsr')
+@pytest.mark.incremental
+class TestScaling:
+    def wait_for_nsr_state(self, proxy, state):
+        """Wait till the NSR reaches a desired state.
+
+        Args:
+            proxy (Callable): Proxy for launchpad session.
+            state (str): Expected state
+        """
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsr = nsr_opdata.nsr[0]
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
+        proxy(RwNsrYang).wait_for(xpath, state, timeout=240)
+
+    def verify_scaling_group(self, proxy, group_name, expected_records_count, scale_out=True):
+        """
+        Args:
+            proxy (Callable): LP session
+            group_name (str): Group name which is being scaled up.
+            scale_out (bool, optional): To identify scale-out/scale-in mode.
+
+        Asserts:
+            1. Additional records are added to the opdata
+            2. Status of the scaling group
+            3. New vnfr record has been created.
+        """
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsr_id = nsr_opdata.nsr[0].ns_instance_config_ref
+
+        xpath = ('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'
+                 '/scaling-group-record[scaling-group-name-ref="{}"]').format(
+                        nsr_id, group_name)
+
+        scaling_record = proxy(NsrYang).get(xpath)
+
+        assert len(scaling_record.instance) == expected_records_count
+
+        for instance in scaling_record.instance:
+            assert instance.op_status == 'running'
+
+            for vnfr in instance.vnfrs:
+                vnfr_record = proxy(RwVnfrYang).get(
+                        "/vnfr-catalog/vnfr[id='{}']".format(vnfr))
+                assert vnfr_record is not None
+
+    def verify_scale_up(self, proxy, group_name, expected):
+        """Verifies the scaling up steps for the group
+        NSR moves from running -> scaling-up -> running
+
+        Args:
+            proxy (callable): LP proxy
+            group_name (str): Name of the group to verify.
+        """
+        self.wait_for_nsr_state(proxy, "scaling-out")
+        self.wait_for_nsr_state(proxy, "running")
+        self.verify_scaling_group(proxy, group_name, expected)
+
+    def verify_scale_in(self, proxy, group_name, expected):
+        """Verifies the scaling in streps for the group.
+        NSR moves from running -> scaling-down -> running
+
+        Args:
+            proxy (callable): LP proxy
+            group_name (str): group name.
+        """
+        self.wait_for_nsr_state(proxy, "scaling-in")
+        self.wait_for_nsr_state(proxy, "running")
+        self.verify_scaling_group(proxy, group_name, expected, scale_out=False)
+
+    def test_wait_for_nsr_configured(self, proxy):
+        """Wait till the NSR state moves to configured before starting scaling
+        tests.
+        """
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        assert len(nsrs) == 1
+        current_nsr = nsrs[0]
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(current_nsr.ns_instance_config_ref)
+        proxy(RwNsrYang).wait_for(xpath, "configured", timeout=240)
+
+
+    def test_min_max_scaling(self, proxy):
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+        nsd_id = nsrs[0].nsd_ref
+        nsr_id = nsrs[0].ns_instance_config_ref
+
+        # group_name = "http_client_group"
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/scaling-group-record".format(nsr_id)
+        scaling_records = proxy(RwNsrYang).get(xpath, list_obj=True)
+
+        for scaling_record in scaling_records.scaling_group_record:
+            group_name = scaling_record.scaling_group_name_ref
+            xpath = "/nsd-catalog/nsd[id='{}']/scaling-group-descriptor[name='{}']".format(
+                    nsd_id, group_name)
+            scaling_group_desc = proxy(NsdYang).get(xpath)
+
+            # Add + 1 to go beyond the threshold
+            for instance_id in range(1, scaling_group_desc.max_instance_count + 1):
+                xpath = '/ns-instance-config/nsr[id="{}"]/scaling-group[scaling-group-name-ref="{}"]'.format(
+                            nsr_id, 
+                            group_name)
+
+                instance = ScalingGroupInstance.from_dict({"id": instance_id})
+                scaling_group = proxy(NsrYang).get(xpath)
+
+                if scaling_group is None:
+                    scaling_group = ScalingGroup.from_dict({
+                        'scaling_group_name_ref': group_name,
+                        })
+
+                scaling_group.instance.append(instance)
+
+                try:
+                    proxy(NsrYang).merge_config(xpath, scaling_group)
+                    self.verify_scale_up(proxy, group_name, instance_id + 1)
+                except rift.auto.session.ProxyRequestError:
+                    assert instance_id == scaling_group_desc.max_instance_count
+
+            for instance_id in range(1, scaling_group_desc.max_instance_count):
+                xpath = ('/ns-instance-config/nsr[id="{}"]/scaling-group'
+                         '[scaling-group-name-ref="{}"]/'
+                         'instance[id="{}"]').format(
+                         nsr_id, group_name, instance_id)
+                proxy(NsrYang).delete_config(xpath)
+                self.verify_scale_in(proxy, group_name, instance_id)
+
+
+
+
+
+
+
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py
new file mode 100644
index 0000000..45a7832
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py
@@ -0,0 +1,677 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 11/03/2015
+@brief Launchpad System Test
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import shutil
+import subprocess
+import tempfile
+import time
+import uuid
+
+import rift.auto.mano
+import rift.auto.session
+import rift.mano.examples.ping_pong_nsd as ping_pong
+
+import gi
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+
+from gi.repository import (
+    NsdYang,
+    RwNsrYang,
+    RwVnfrYang,
+    NsrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+logging.basicConfig(level=logging.DEBUG)
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+@pytest.fixture(scope='module')
+def rwvnfr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfrYang)
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+class DescriptorOnboardError(Exception):
+    pass
+
+def create_nsr(nsd, input_param_list, cloud_account_name):
+    """
+    Create the NSR record object
+
+    Arguments:
+        nsd                 - NSD
+        input_param_list    - list of input-parameter objects
+        cloud_account_name  - name of cloud account
+
+    Return:
+         NSR object
+    """
+    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+
+    nsr.id = str(uuid.uuid4())
+    nsr.name = rift.auto.mano.resource_name(nsr.id)
+    nsr.short_name = "nsr_short_name"
+    nsr.description = "This is a description"
+    nsr.nsd.from_dict(nsr.as_dict())
+    nsr.admin_status = "ENABLED"
+    nsr.input_parameter.extend(input_param_list)
+    nsr.cloud_account = cloud_account_name
+
+    return nsr
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl --insecure -F "descriptor=@{file}" https://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+    )
+
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=30, host="127.0.0.1"):
+
+    def check_status_onboard_status():
+        uri = 'https://%s:4567/api/upload/%s/state' % (host, transaction_id)
+        curl_cmd = 'curl --insecure {uri}'.format(uri=uri)
+        return subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    logger.info("Waiting for onboard transaction [%s] to complete", transaction_id)
+
+    elapsed = 0
+    start = time.time()
+    while elapsed < timeout:
+
+        reply = check_status_onboard_status()
+        state = json.loads(reply)
+        if state["status"] == "success":
+            break
+
+        if state["status"] == "failure":
+            raise DescriptorOnboardError(state["errors"])
+
+        if state["status"] != "pending":
+            raise DescriptorOnboardError(state)
+
+        time.sleep(1)
+        elapsed = time.time() - start
+
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+    logger.info("Descriptor onboard was successful")
+
+def onboard_descriptor(host, file_name, logger, endpoint, scheme, cert):
+    """On-board/update the descriptor.
+
+    Args:
+        host (str): Launchpad IP
+        file_name (str): Full file path.
+        logger: Logger instance
+        endpoint (str): endpoint to be used for the upload operation.
+
+    """
+    logger.info("Onboarding package: %s", file_name)
+    trans_id = upload_descriptor(
+            logger,
+            file_name,
+            host=host)
+    wait_onboard_transaction_finished(
+        logger,
+        trans_id,
+        host=host)
+
+
+def terminate_nsrs(rwvnfr_proxy, rwnsr_proxy, logger):
+    """
+    Terminate the instance and check if the record is deleted.
+
+    Asserts:
+    1. NSR record is deleted from instance-config.
+
+    """
+    logger.debug("Terminating Ping Pong NSRs")
+
+    nsr_path = "/ns-instance-config"
+    nsr = rwnsr_proxy.get_config(nsr_path)
+    nsrs = nsr.nsr
+
+    xpaths = []
+    for ping_pong in nsrs:
+        xpath = "/ns-instance-config/nsr[id='{}']".format(ping_pong.id)
+        rwnsr_proxy.delete_config(xpath)
+        xpaths.append(xpath)
+
+    time.sleep(60)
+    for xpath in xpaths:
+        nsr = rwnsr_proxy.get_config(xpath)
+        assert nsr is None
+
+    # Get the ns-instance-config
+    ns_instance_config = rwnsr_proxy.get_config("/ns-instance-config")
+
+    # Termination tests
+    vnfr = "/vnfr-catalog/vnfr"
+    vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
+    assert vnfrs is None or len(vnfrs.vnfr) == 0
+
+    # nsr = "/ns-instance-opdata/nsr"
+    # nsrs = rwnsr_proxy.get(nsr, list_obj=True)
+    # assert len(nsrs.nsr) == 0
+
+
+def generate_tar_files(tmpdir, ping_vnfd, pong_vnfd, ping_pong_nsd):
+    """Converts the descriptor to files and package them into zip files
+    that can be uploaded to LP instance.
+
+    Args:
+        tmpdir (string): Full path where the zipped files should be
+        ping_vnfd (VirtualNetworkFunction): Ping VNFD data
+        pong_vnfd (VirtualNetworkFunction): Pong VNFD data
+        ping_pong_nsd (NetworkService): PingPong NSD data
+
+    Returns:
+        Tuple: file path for ping vnfd, pong vnfd and ping_pong_nsd
+    """
+    rift_build = os.environ['RIFT_BUILD']
+    MANO_DIR = os.path.join(
+            rift_build,
+            "modules/core/mano/src/core_mano-build/examples/ping_pong_ns")
+    ping_img = os.path.join(MANO_DIR, "ping_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2")
+    pong_img = os.path.join(MANO_DIR, "pong_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2")
+
+    """ grab cached copies of these files if not found. They may not exist
+        because our git submodule dependency mgmt
+        will not populate these because they live in .build, not .install
+    """
+    if not os.path.exists(ping_img):
+        ping_img = os.path.join(
+                    os.environ['RIFT_ROOT'],
+                    'images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2')
+        pong_img = os.path.join(
+                    os.environ['RIFT_ROOT'],
+                    'images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2')
+
+    for descriptor in [ping_vnfd, pong_vnfd, ping_pong_nsd]:
+        descriptor.write_to_file(output_format='xml', outdir=tmpdir.name)
+
+    ping_img_path = os.path.join(tmpdir.name, "{}/images/".format(ping_vnfd.name))
+    pong_img_path = os.path.join(tmpdir.name, "{}/images/".format(pong_vnfd.name))
+    os.makedirs(ping_img_path)
+    os.makedirs(pong_img_path)
+
+    shutil.copy(ping_img, ping_img_path)
+    shutil.copy(pong_img, pong_img_path)
+
+    for dir_name in [ping_vnfd.name, pong_vnfd.name, ping_pong_nsd.name]:
+        subprocess.call([
+                "sh",
+                "{rift_install}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh".format(rift_install=os.environ['RIFT_INSTALL']),
+                tmpdir.name,
+                dir_name])
+
+    return (os.path.join(tmpdir.name, "{}.tar.gz".format(ping_vnfd.name)),
+            os.path.join(tmpdir.name, "{}.tar.gz".format(pong_vnfd.name)),
+            os.path.join(tmpdir.name, "{}.tar.gz".format(ping_pong_nsd.name)))
+
+
+@pytest.mark.setup('pingpong')
+@pytest.mark.depends('launchpad')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestPingPongStart(object):
+    """A brief overview of the steps performed.
+    1. Generate & on-board new descriptors
+    2. Start & stop the ping pong NSR
+    3. Update the exiting descriptor files.
+    4. Start the ping pong NSR.
+
+    """
+
+
+    def test_onboard_descriptors(
+            self,
+            logger,
+            vnfd_proxy,
+            nsd_proxy,
+            mgmt_session,
+            scheme,
+            cert,
+            ping_pong_records):
+        """Generates & On-boards the descriptors.
+        """
+        temp_dirs = []
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        endpoint = "upload"
+
+        """
+        This upload routine can get called multiples times for upload API,
+        depending on the combinations of 'cloud_account' & 'endpoint'
+        fixtures. Since the records are cached at module level, we might end up
+        uploading the same uuids multiple times, thus causing errors. So a
+        simple work-around will be to skip the records when they are uploaded
+        for the second time.
+        """
+        def onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file):
+            # On-board VNFDs
+            for file_name in [ping_vnfd_file, pong_vnfd_file]:
+                onboard_descriptor(
+                        mgmt_session.host,
+                        file_name,
+                        logger,
+                        endpoint,
+                        scheme,
+                        cert)
+
+            catalog = vnfd_proxy.get_config('/vnfd-catalog')
+            vnfds = catalog.vnfd
+            assert len(vnfds) == 2, "There should two vnfds"
+            assert "ping_vnfd" in [vnfds[0].name, vnfds[1].name]
+            assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+
+        def delete_vnfds():
+            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            for vnfd_record in vnfds.vnfd:
+                xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+                vnfd_proxy.delete_config(xpath)
+
+            time.sleep(5)
+            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            assert vnfds is None or len(vnfds.vnfd) == 0
+
+
+        if catalog is not None and len(catalog.vnfd) == 2 and endpoint == "upload":
+            return
+
+        if endpoint == "update":
+            for vnfd_record in [ping_vnfd, pong_vnfd]:
+                vnfd_record.descriptor.vnfd[0].description += "_update"
+            ping_pong_nsd.descriptor.nsd[0].description += "_update"
+
+        tmpdir2 = tempfile.TemporaryDirectory()
+        temp_dirs.append(tmpdir2)
+        ping_pong.generate_ping_pong_descriptors(pingcount=1,
+                                                  write_to_file=True,
+                                                  out_dir=tmpdir2.name,
+                                                  ping_fmt='json',
+                                                  pong_fmt='xml',
+                                                  )
+
+        # On-board VNFDs without image
+        ping_vnfd_file = os.path.join(tmpdir2.name, 'ping_vnfd/vnfd/ping_vnfd.json')
+        pong_vnfd_file = os.path.join(tmpdir2.name, 'pong_vnfd/vnfd/pong_vnfd.xml')
+        onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file)
+
+        delete_vnfds()
+
+        tmpdir = tempfile.TemporaryDirectory()
+        temp_dirs.append(tmpdir)
+
+        ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records
+        ping_vnfd_file, pong_vnfd_file, pingpong_nsd_file = \
+            generate_tar_files(tmpdir, ping_vnfd, pong_vnfd, ping_pong_nsd)
+
+        # On-board VNFDs with image
+        onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file)
+
+        # On-board NSD
+        onboard_descriptor(
+                mgmt_session.host,
+                pingpong_nsd_file,
+                logger,
+                endpoint,
+                scheme,
+                cert)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        assert nsds[0].name == "ping_pong_nsd"
+
+        # Temp directory cleanup
+#         for temp_dir in temp_dirs:
+#             temp_dir.cleanup()
+
+    def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account):
+
+        def verify_input_parameters(running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (run_input_param.xpath == config_param.xpath and
+                    run_input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
+        descr_value = "automation"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd, input_parameters, cloud_account.name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+        assert nsr_opdata is not None
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+    def test_wait_for_pingpong_started(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(
+                    nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=180)
+
+    def test_wait_for_pingpong_configured(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(
+                    nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
+
+
+@pytest.mark.feature("update-api")
+@pytest.mark.depends('pingpong')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestUpdateNsr(object):
+    def test_stop_nsr(self, rwvnfr_proxy, rwnsr_proxy, logger):
+        """Terminate the currently running NSR instance before updating the descriptor files"""
+        terminate_nsrs(rwvnfr_proxy, rwnsr_proxy, logger)
+
+    def test_onboard_descriptors(
+            self,
+            logger,
+            vnfd_proxy,
+            nsd_proxy,
+            mgmt_session,
+            scheme,
+            cert,
+            ping_pong_records):
+        """Generates & On-boards the descriptors.
+        """
+        temp_dirs = []
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        endpoint = "update"
+        ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records                
+
+        """
+        This upload routine can get called multiples times for upload API,
+        depending on the combinations of 'cloud_account' & 'endpoint'
+        fixtures. Since the records are cached at module level, we might end up
+        uploading the same uuids multiple times, thus causing errors. So a
+        simple work-around will be to skip the records when they are uploaded
+        for the second time.
+        """
+        def onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file):
+            # On-board VNFDs
+            for file_name in [ping_vnfd_file, pong_vnfd_file]:
+                onboard_descriptor(
+                        mgmt_session.host,
+                        file_name,
+                        logger,
+                        endpoint,
+                        scheme,
+                        cert)
+
+            catalog = vnfd_proxy.get_config('/vnfd-catalog')
+            vnfds = catalog.vnfd
+
+            assert len(vnfds) == 2, "There should two vnfds"
+            assert "ping_vnfd" in [vnfds[0].name, vnfds[1].name]
+            assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+        def delete_nsds():
+            nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+            for nsd_record in nsds.nsd:
+                xpath = "/nsd-catalog/nsd[id='{}']".format(nsd_record.id)
+                nsd_proxy.delete_config(xpath)
+
+            time.sleep(5)
+            nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+            assert nsds is None or len(nsds.nsd) == 0
+        delete_nsds()
+
+        def delete_vnfds():
+            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            for vnfd_record in vnfds.vnfd:
+                xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+                vnfd_proxy.delete_config(xpath)
+
+            time.sleep(5)
+            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            assert vnfds is None or len(vnfds.vnfd) == 0
+
+        delete_vnfds()
+
+        if catalog is not None and len(catalog.vnfd) == 2 and endpoint == "upload":
+            return
+
+        ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records
+
+        if endpoint == "update":
+            for vnfd_record in [ping_vnfd, pong_vnfd]:
+                vnfd_record.descriptor.vnfd[0].description += "_update"
+            ping_pong_nsd.descriptor.nsd[0].description += "_update"
+
+        tmpdir2 = tempfile.TemporaryDirectory()
+        temp_dirs.append(tmpdir2)
+        ping_pong.generate_ping_pong_descriptors(pingcount=1,
+                                                  write_to_file=True,
+                                                  out_dir=tmpdir2.name,
+                                                  ping_fmt='json',
+                                                  pong_fmt='xml',
+                                                  )
+
+        # On-board VNFDs without image
+        ping_vnfd_file = os.path.join(tmpdir2.name, 'ping_vnfd/vnfd/ping_vnfd.json')
+        pong_vnfd_file = os.path.join(tmpdir2.name, 'pong_vnfd/vnfd/pong_vnfd.xml')
+        onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file)
+        delete_vnfds()
+
+        tmpdir = tempfile.TemporaryDirectory()
+        temp_dirs.append(tmpdir)
+
+        ping_vnfd_file, pong_vnfd_file, pingpong_nsd_file = \
+            generate_tar_files(tmpdir, ping_vnfd, pong_vnfd, ping_pong_nsd)
+
+        # On-board VNFDs with image
+        onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file)
+
+
+        # On-board NSD
+        onboard_descriptor(
+                mgmt_session.host,
+                pingpong_nsd_file,
+                logger,
+                endpoint,
+                scheme,
+                cert)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        assert nsds[0].name == "ping_pong_nsd"
+
+        # Temp directory cleanup
+#         for temp_dir in temp_dirs:
+#             temp_dir.cleanup()
+
+    def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account):
+        def verify_input_parameters(running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (run_input_param.xpath == config_param.xpath and
+                    run_input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
+        descr_value = "automation"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd, input_parameters, cloud_account.name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+        assert nsr_opdata is not None
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+    def test_wait_for_pingpong_started(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(
+                    nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=180)
+
+    def test_wait_for_pingpong_configured(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(
+                    nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
+
+
+@pytest.mark.teardown('pingpong')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestPingPongTeardown(object):
+    def test_terminate_nsrs(self, rwvnfr_proxy, rwnsr_proxy, logger):
+        """
+        Terminate the instance and check if the record is deleted.
+
+        Asserts:
+        1. NSR record is deleted from instance-config.
+
+        """
+        logger.debug("Terminating Ping Pong NSR")
+        terminate_nsrs(rwvnfr_proxy, rwnsr_proxy, logger)
+
+    def test_delete_records(self, nsd_proxy, vnfd_proxy):
+        """Delete the NSD & VNFD records
+
+        Asserts:
+            The records are deleted.
+        """
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            nsd_proxy.delete_config(xpath)
+
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong_longevity.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong_longevity.py
new file mode 100644
index 0000000..ff8fa96
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong_longevity.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2016/01/04
+#
+
+import pytest
+import rift.vcs.vcs
+import time
+
+import gi
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+def test_launchpad_longevity(mgmt_session, mgmt_domain_name, rwnsr_proxy):
+    time.sleep(60)
+    rift.vcs.vcs.wait_until_system_started(mgmt_session)
+
+    nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+    for nsr in nsr_opdata.nsr:
+        xpath = ("/ns-instance-opdata"
+                 "/nsr[ns-instance-config-ref='%s']"
+                 "/operational-status") % (nsr.ns_instance_config_ref)
+        operational_status = rwnsr_proxy.get(xpath)
+        assert operational_status == 'running'
+
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py
new file mode 100644
index 0000000..920bd70
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py
@@ -0,0 +1,487 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import socket
+import subprocess
+import time
+
+import pytest
+
+import gi
+import re
+gi.require_version('RwNsrYang', '1.0')
+from gi.repository import (
+        NsdYang,
+        RwBaseYang,
+        RwConmanYang,
+        RwNsrYang,
+        RwNsdYang,
+        RwVcsYang,
+        RwVlrYang,
+        RwVnfdYang,
+        RwVnfrYang,
+        VlrYang,
+        VnfrYang,
+        )
+import rift.auto.session
+import rift.mano.examples.ping_pong_nsd as ping_pong
+
+
+@pytest.fixture(scope='module')
+def proxy(request, mgmt_session):
+    return mgmt_session.proxy
+
+@pytest.fixture(scope='session')
+def updated_ping_pong_records(ping_pong_factory):
+    '''Fixture returns a newly created set of ping and pong descriptors
+    for the create_update tests
+    '''
+    return ping_pong_factory.generate_descriptors()
+
+def yield_vnfd_vnfr_pairs(proxy, nsr=None):
+    """
+    Yields tuples of vnfd & vnfr entries.
+
+    Args:
+        proxy (callable): Launchpad proxy
+        nsr (optional): If specified, only the vnfr & vnfd records of the NSR
+                are returned
+
+    Yields:
+        Tuple: VNFD and its corresponding VNFR entry
+    """
+    def get_vnfd(vnfd_id):
+        xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_id)
+        return proxy(RwVnfdYang).get(xpath)
+
+    vnfr = "/vnfr-catalog/vnfr"
+    vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True)
+    for vnfr in vnfrs.vnfr:
+
+        if nsr:
+            const_vnfr_ids = [const_vnfr.vnfr_id for const_vnfr in nsr.constituent_vnfr_ref]
+            if vnfr.id not in const_vnfr_ids:
+                continue
+
+        vnfd = get_vnfd(vnfr.vnfd_ref)
+        yield vnfd, vnfr
+
+
+def yield_nsd_nsr_pairs(proxy):
+    """Yields tuples of NSD & NSR
+
+    Args:
+        proxy (callable): Launchpad proxy
+
+    Yields:
+        Tuple: NSD and its corresponding NSR record
+    """
+
+    for nsr_cfg, nsr in yield_nsrc_nsro_pairs(proxy):
+        nsd_path = "/nsd-catalog/nsd[id='{}']".format(
+                nsr_cfg.nsd.id)
+        nsd = proxy(RwNsdYang).get_config(nsd_path)
+
+        yield nsd, nsr
+
+def yield_nsrc_nsro_pairs(proxy):
+    """Yields tuples of NSR Config & NSR Opdata pairs
+
+    Args:
+        proxy (callable): Launchpad proxy
+
+    Yields:
+        Tuple: NSR config and its corresponding NSR op record
+    """
+    nsr = "/ns-instance-opdata/nsr"
+    nsrs = proxy(RwNsrYang).get(nsr, list_obj=True)
+    for nsr in nsrs.nsr:
+        nsr_cfg_path = "/ns-instance-config/nsr[id='{}']".format(
+                nsr.ns_instance_config_ref)
+        nsr_cfg = proxy(RwNsrYang).get_config(nsr_cfg_path)
+
+        yield nsr_cfg, nsr
+
+
+def assert_records(proxy):
+    """Verifies if the NSR & VNFR records are created
+    """
+    ns_tuple = list(yield_nsd_nsr_pairs(proxy))
+    assert len(ns_tuple) == 1
+
+    vnf_tuple = list(yield_vnfd_vnfr_pairs(proxy))
+    assert len(vnf_tuple) == 2
+
+
+@pytest.mark.depends('nsr')
+@pytest.mark.setup('records')
+@pytest.mark.usefixtures('recover_tasklet')
+@pytest.mark.incremental
+class TestRecordsData(object):
+    def is_valid_ip(self, address):
+        """Verifies if it is a valid IP and if its accessible
+
+        Args:
+            address (str): IP address
+
+        Returns:
+            boolean
+        """
+        try:
+            socket.inet_aton(address)
+        except socket.error:
+            return False
+        else:
+            return True
+
+
+    @pytest.mark.feature("recovery")
+    def test_tasklets_recovery(self, mgmt_session, proxy, recover_tasklet):
+        """Test the recovery feature of tasklets
+
+        Triggers the vcrash and waits till the system is up
+        """
+        RECOVERY = "RESTART"
+
+        def vcrash(comp):
+            rpc_ip = RwVcsYang.VCrashInput.from_dict({"instance_name": comp})
+            proxy(RwVcsYang).rpc(rpc_ip)
+
+        tasklet_name = r'^{}-.*'.format(recover_tasklet)
+
+        vcs_info = proxy(RwBaseYang).get("/vcs/info/components")
+        for comp in vcs_info.component_info:
+            if comp.recovery_action == RECOVERY and \
+               re.match(tasklet_name, comp.instance_name):
+                vcrash(comp.instance_name)
+
+        time.sleep(60)
+
+        rift.vcs.vcs.wait_until_system_started(mgmt_session)
+        # NSM tasklet takes a couple of seconds to set up the python structure
+        # so sleep and then continue with the tests.
+        time.sleep(60)
+
+    def test_records_present(self, proxy):
+        assert_records(proxy)
+
+    def test_nsd_ref_count(self, proxy):
+        """
+        Asserts
+        1. The ref count data of the NSR with the actual number of NSRs
+        """
+        nsd_ref_xpath = "/ns-instance-opdata/nsd-ref-count"
+        nsd_refs = proxy(RwNsrYang).get(nsd_ref_xpath, list_obj=True)
+
+        expected_ref_count = collections.defaultdict(int)
+        for nsd_ref in nsd_refs.nsd_ref_count:
+            expected_ref_count[nsd_ref.nsd_id_ref] = nsd_ref.instance_ref_count
+
+        actual_ref_count = collections.defaultdict(int)
+        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+            actual_ref_count[nsd.id] += 1
+
+        assert expected_ref_count == actual_ref_count
+
+    def test_vnfd_ref_count(self, proxy):
+        """
+        Asserts
+        1. The ref count data of the VNFR with the actual number of VNFRs
+        """
+        vnfd_ref_xpath = "/vnfr-catalog/vnfd-ref-count"
+        vnfd_refs = proxy(RwVnfrYang).get(vnfd_ref_xpath, list_obj=True)
+
+        expected_ref_count = collections.defaultdict(int)
+        for vnfd_ref in vnfd_refs.vnfd_ref_count:
+            expected_ref_count[vnfd_ref.vnfd_id_ref] = vnfd_ref.instance_ref_count
+
+        actual_ref_count = collections.defaultdict(int)
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            actual_ref_count[vnfd.id] += 1
+
+        assert expected_ref_count == actual_ref_count
+
+    def test_nsr_nsd_records(self, proxy):
+        """
+        Verifies the correctness of the NSR record using its NSD counter-part
+
+        Asserts:
+        1. The count of vnfd and vnfr records
+        2. Count of connection point descriptor and records
+        """
+        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+            assert nsd.name == nsr.nsd_name_ref
+            assert len(nsd.constituent_vnfd) == len(nsr.constituent_vnfr_ref)
+
+            assert len(nsd.vld) == len(nsr.vlr)
+            for vnfd_conn_pts, vnfr_conn_pts in zip(nsd.vld, nsr.vlr):
+                assert len(vnfd_conn_pts.vnfd_connection_point_ref) == \
+                       len(vnfr_conn_pts.vnfr_connection_point_ref)
+
+    def test_vdu_record_params(self, proxy):
+        """
+        Asserts:
+        1. If a valid floating IP has been assigned to the VM
+        2. Count of VDUD and the VDUR
+        3. Check if the VM flavor has been copied over the VDUR
+        """
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            assert vnfd.mgmt_interface.port == vnfr.mgmt_interface.port
+            assert len(vnfd.vdu) == len(vnfr.vdur)
+
+            for vdud, vdur in zip(vnfd.vdu, vnfr.vdur):
+                assert vdud.vm_flavor == vdur.vm_flavor
+                assert self.is_valid_ip(vdur.management_ip) is True
+                assert vdud.external_interface[0].vnfd_connection_point_ref == \
+                    vdur.external_interface[0].vnfd_connection_point_ref
+
+    def test_external_vl(self, proxy):
+        """
+        Asserts:
+        1. Valid IP for external connection point
+        2. A valid external network fabric
+        3. Connection point names are copied over
+        4. Count of VLD and VLR
+        5. Checks for a valid subnet ?
+        6. Checks for the operational status to be running?
+        """
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            cp_des, cp_rec = vnfd.connection_point, vnfr.connection_point
+
+            assert len(cp_des) == len(cp_rec)
+            assert cp_des[0].name == cp_rec[0].name
+            assert self.is_valid_ip(cp_rec[0].ip_address) is True
+
+            xpath = "/vlr-catalog/vlr[id='{}']".format(cp_rec[0].vlr_ref)
+            vlr = proxy(RwVlrYang).get(xpath)
+
+            assert len(vlr.network_id) > 0
+            assert len(vlr.assigned_subnet) > 0
+            ip, _ = vlr.assigned_subnet.split("/")
+            assert self.is_valid_ip(ip) is True
+            assert vlr.operational_status == "running"
+
+
+    def test_nsr_record(self, proxy):
+        """
+        Currently we only test for the components of NSR tests. Ignoring the
+        operational-events records
+
+        Asserts:
+        1. The constituent components.
+        2. Admin status of the corresponding NSD record.
+        """
+        for nsr_cfg, nsr in yield_nsrc_nsro_pairs(proxy):
+            # 1 n/w and 2 connection points
+            assert len(nsr.vlr) == 1
+            assert len(nsr.vlr[0].vnfr_connection_point_ref) == 2
+
+            assert len(nsr.constituent_vnfr_ref) == 2
+            assert nsr_cfg.admin_status == 'ENABLED'
+
+    def test_wait_for_pingpong_configured(self, proxy):
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        assert len(nsrs) == 1
+        current_nsr = nsrs[0]
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(current_nsr.ns_instance_config_ref)
+        proxy(RwNsrYang).wait_for(xpath, "configured", timeout=400)
+
+    def test_monitoring_params(self, proxy):
+        """
+        Asserts:
+        1. The value counter ticks?
+        2. If the meta fields are copied over
+        """
+        def mon_param_record(vnfr_id, mon_param_id):
+             return '/vnfr-catalog/vnfr[id="{}"]/monitoring-param[id="{}"]'.format(
+                    vnfr_id, mon_param_id)
+
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            for mon_des in (vnfd.monitoring_param):
+                mon_rec = mon_param_record(vnfr.id, mon_des.id)
+                mon_rec = proxy(VnfrYang).get(mon_rec)
+
+                # Meta data check
+                fields = mon_des.as_dict().keys()
+                for field in fields:
+                    assert getattr(mon_des, field) == getattr(mon_rec, field)
+                # Tick check
+                #assert mon_rec.value_integer > 0
+
+    def test_cm_nsr(self, proxy):
+        """
+        Asserts:
+            1. The ID of the NSR in cm-state
+            2. Name of the cm-nsr
+            3. The vnfr component's count
+            4. State of the cm-nsr
+        """
+        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+            con_nsr_xpath = "/cm-state/cm-nsr[id='{}']".format(nsr.ns_instance_config_ref)
+            con_data = proxy(RwConmanYang).get(con_nsr_xpath)
+
+            assert con_data.name == "ping_pong_nsd"
+            assert len(con_data.cm_vnfr) == 2
+
+            state_path = con_nsr_xpath + "/state"
+            proxy(RwConmanYang).wait_for(state_path, 'ready', timeout=120)
+
+    def test_cm_vnfr(self, proxy):
+        """
+        Asserts:
+            1. The ID of Vnfr in cm-state
+            2. Name of the vnfr
+            3. State of the VNFR
+            4. Checks for a reachable IP in mgmt_interface
+            5. Basic checks for connection point and cfg_location.
+        """
+        def is_reachable(ip, timeout=10):
+            rc = subprocess.call(["ping", "-c1", "-w", str(timeout), ip])
+            if rc == 0:
+                return True
+            return False
+
+        nsr_cfg, _ = list(yield_nsrc_nsro_pairs(proxy))[0]
+        con_nsr_xpath = "/cm-state/cm-nsr[id='{}']".format(nsr_cfg.id)
+
+        for _, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            con_vnfr_path = con_nsr_xpath + "/cm-vnfr[id='{}']".format(vnfr.id)
+            con_data = proxy(RwConmanYang).get(con_vnfr_path)
+
+            assert con_data is not None
+
+            state_path = con_vnfr_path + "/state"
+            proxy(RwConmanYang).wait_for(state_path, 'ready', timeout=120)
+
+            con_data = proxy(RwConmanYang).get(con_vnfr_path)
+            assert is_reachable(con_data.mgmt_interface.ip_address) is True
+
+            assert len(con_data.connection_point) == 1
+            connection_point = con_data.connection_point[0]
+            assert connection_point.name == vnfr.connection_point[0].name
+            assert connection_point.ip_address == vnfr.connection_point[0].ip_address
+
+            assert con_data.cfg_location is not None
+
+@pytest.mark.depends('nsr')
+@pytest.mark.setup('nfvi')
+@pytest.mark.incremental
+class TestNfviMetrics(object):
+
+    def test_records_present(self, proxy):
+        assert_records(proxy)
+
+    @pytest.mark.skipif(True, reason='NFVI metrics collected from NSR are deprecated, test needs to be updated to collected metrics from VNFRs')
+    def test_nfvi_metrics(self, proxy):
+        """
+        Verify the NFVI metrics
+
+        Asserts:
+            1. Computed metrics, such as memory, cpu, storage and ports, match
+               with the metrics in NSR record. The metrics are computed from the
+               descriptor records.
+            2. Check if the 'utilization' field has a valid value (> 0) and matches
+               with the 'used' field, if available.
+        """
+        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+            nfvi_metrics = nsr.nfvi_metrics
+            computed_metrics = collections.defaultdict(int)
+
+            # Get the constituent VNF records.
+            for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy, nsr):
+                vdu = vnfd.vdu[0]
+                vm_spec = vdu.vm_flavor
+                computed_metrics['vm'] += 1
+                computed_metrics['memory'] += vm_spec.memory_mb * (10**6)
+                computed_metrics['storage'] += vm_spec.storage_gb * (10**9)
+                computed_metrics['vcpu'] += vm_spec.vcpu_count
+                computed_metrics['external_ports'] += len(vnfd.connection_point)
+                computed_metrics['internal_ports'] += len(vdu.internal_connection_point)
+
+            assert nfvi_metrics.vm.active_vm == computed_metrics['vm']
+
+            # Availability checks
+            for metric_name in computed_metrics:
+                metric_data = getattr(nfvi_metrics, metric_name)
+                total_available = getattr(metric_data, 'total', None)
+
+                if total_available is not None:
+                    assert computed_metrics[metric_name] == total_available
+
+            # Utilization checks
+            for metric_name in ['memory', 'storage', 'vcpu']:
+                metric_data = getattr(nfvi_metrics, metric_name)
+
+                utilization = metric_data.utilization
+                # assert utilization > 0
+
+                # If used field is available, check if it matches with utilization!
+                total = metric_data.total
+                used = getattr(metric_data, 'used', None)
+                if used is not None:
+                    assert total > 0
+                    computed_utilization = round((used/total) * 100, 2)
+                    assert abs(computed_utilization - utilization) <= 0.1
+
+
+
+@pytest.mark.depends('nfvi')
+@pytest.mark.incremental
+class TestRecordsDescriptors:
+    def test_create_update_vnfd(self, proxy, updated_ping_pong_records):
+        """
+        Verify VNFD related operations
+
+        Asserts:
+            If a VNFD record is created
+        """
+        ping_vnfd, pong_vnfd, _ = updated_ping_pong_records
+        vnfdproxy = proxy(RwVnfdYang)
+
+        for vnfd_record in [ping_vnfd, pong_vnfd]:
+            xpath = "/vnfd-catalog/vnfd"
+            vnfdproxy.create_config(xpath, vnfd_record.vnfd)
+
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd = vnfdproxy.get(xpath)
+            assert vnfd.id == vnfd_record.id
+
+            vnfdproxy.replace_config(xpath, vnfd_record.vnfd)
+
+    def test_create_update_nsd(self, proxy, updated_ping_pong_records):
+        """
+        Verify NSD related operations
+
+        Asserts:
+            If NSD record was created
+        """
+        _, _, ping_pong_nsd = updated_ping_pong_records
+        nsdproxy = proxy(NsdYang)
+
+        xpath = "/nsd-catalog/nsd"
+        nsdproxy.create_config(xpath, ping_pong_nsd.descriptor)
+
+        xpath = "/nsd-catalog/nsd[id='{}']".format(ping_pong_nsd.id)
+        nsd = nsdproxy.get(xpath)
+        assert nsd.id == ping_pong_nsd.id
+
+        nsdproxy.replace_config(xpath, ping_pong_nsd.descriptor)
+
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py
new file mode 100644
index 0000000..0878db7
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py
@@ -0,0 +1,192 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_scaling.py
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 07/13/2016
+@brief Pingpong scaling system test
+"""
+
+import os
+import pytest
+import subprocess
+import sys
+import time
+import uuid
+
+import rift.auto.mano
+import rift.auto.session
+import rift.auto.descriptor
+
+from gi.repository import (
+    NsrYang,
+    NsdYang,
+    VnfrYang,
+    RwNsrYang,
+    RwNsdYang,
+    RwVnfrYang,
+)
+
+@pytest.mark.setup('pingpong_nsd')
+@pytest.mark.depends('launchpad')
+class TestSetupPingpongNsd(object):
+    def test_onboard(self, mgmt_session, descriptors):
+        for descriptor in descriptors:
+            rift.auto.descriptor.onboard(mgmt_session.host, descriptor)
+
+    def test_install_sar(self, mgmt_session):
+        install_cmd = 'ssh {mgmt_ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sudo yum install sysstat --assumeyes'.format(
+                mgmt_ip=mgmt_session.host,
+        )
+        subprocess.check_call(install_cmd, shell=True)
+
+
+@pytest.fixture(scope='function', params=[5,10,15,20,25])
+def service_count(request):
+    '''Fixture representing the number of services to test'''
+    return request.param
+
+@pytest.mark.depends('pingpong_nsd')
+class TestScaling(object):
+    @pytest.mark.preserve_fixture_order
+    def test_scaling(self, mgmt_session, cloud_account_name, service_count):
+
+        def start_services(mgmt_session, desired_service_count, max_attempts=3): 
+            catalog = mgmt_session.proxy(NsdYang).get_config('/nsd-catalog')
+            nsd = catalog.nsd[0]
+            
+            nsr_path = "/ns-instance-config"
+            nsr = mgmt_session.proxy(RwNsrYang).get_config(nsr_path)
+            service_count = len(nsr.nsr)
+
+            attempts = 0
+            while attempts < max_attempts and service_count < desired_service_count:
+                attempts += 1
+
+                for count in range(service_count, desired_service_count):
+                    nsr = rift.auto.descriptor.create_nsr(
+                        cloud_account_name,
+                        "pingpong_%s" % str(uuid.uuid4().hex[:10]),
+                        nsd.id)
+                    mgmt_session.proxy(RwNsrYang).create_config('/ns-instance-config/nsr', nsr)
+
+                ns_instance_opdata = mgmt_session.proxy(RwNsrYang).get('/ns-instance-opdata')
+                for nsr in ns_instance_opdata.nsr:
+                    try:
+                        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
+                        mgmt_session.proxy(RwNsrYang).wait_for(xpath, "running", fail_on=['failed'], timeout=180)
+                        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(nsr.ns_instance_config_ref)
+                        mgmt_session.proxy(RwNsrYang).wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
+                        service_count += 1
+                    except rift.auto.session.ProxyWaitForError:
+                        mgmt_session.proxy(RwNsrYang).delete_config("/ns-instance-config/nsr[id='{}']".format(nsr.ns_instance_config_ref))
+
+        def monitor_launchpad_performance(service_count, interval=30, samples=1):
+            sar_cmd = "ssh {mgmt_ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sar -A {interval} {samples}".format(
+                    mgmt_ip=mgmt_session.host,
+                    interval=interval,
+                    samples=samples
+            )
+            output = subprocess.check_output(sar_cmd, shell=True, stderr=subprocess.STDOUT)
+            outfile = '{rift_artifacts}/scaling_{task_id}.log'.format(
+                    rift_artifacts=os.environ.get('RIFT_ARTIFACTS'),
+                    task_id=os.environ.get('AUTO_TASK_ID')
+            )
+            with open(outfile, 'a') as fh:
+                message = '''
+== SCALING RESULTS : {service_count} Network Services ==
+{output}               
+                '''.format(service_count=service_count, output=output.decode())
+                fh.write(message)
+
+        start_services(mgmt_session, service_count)
+        monitor_launchpad_performance(service_count, interval=30, samples=1)
+
+@pytest.mark.depends('pingpong_nsd')
+@pytest.mark.teardown('pingpong_nsd')
+class TestTeardownPingpongNsr(object):
+    def test_teardown_nsr(self, mgmt_session):
+
+        ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config('/ns-instance-config')
+        for nsr in ns_instance_config.nsr:
+            mgmt_session.proxy(RwNsrYang).delete_config("/ns-instance-config/nsr[id='{}']".format(nsr.id))
+
+        time.sleep(60)
+        vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get('/vnfr-catalog')
+        assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
+    def test_generate_plots(self):
+        plot_commands = [
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_cpu_{task_id}.png" '
+                    '--title "CPU Utilization by network service count" '
+                    '--keys CPU '
+                    '--fields %usr,%idle,%sys '
+                    '--key-filter CPU:all '
+                    '--ylabel "CPU Utilization %" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_mem_{task_id}.png" '
+                    '--title "Memory Utilization by network service count" '
+                    '--fields kbmemfree,kbmemused,kbbuffers,kbcached,kbcommit,kbactive,kbinact,kbdirty '
+                    '--ylabel "Memory Utilization" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_mempct_{task_id}.png" '
+                    '--title "Memory Utilization by network service count" '
+                    '--fields %memused,%commit '
+                    '--ylabel "Memory Utilization %" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_iface_{task_id}.png" '
+                    '--title "Interface Utilization by network service count" '
+                    '--keys IFACE '
+                    '--fields rxpck/s,txpck/s,rxkB/s,txkB/s,rxcmp/s,txcmp/s,rxmcst/s '
+                    '--key-filter IFACE:eth0 '
+                    '--ylabel "Interface Utilization" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_iface_err_{task_id}.png" '
+                    '--title "Interface Errors by network service count" '
+                    '--keys IFACE '
+                    '--fields rxerr/s,txerr/s,coll/s,rxdrop/s,txdrop/s,txcarr/s,rxfram/s,rxfifo/s,txfifo/s '
+                    '--key-filter IFACE:eth0 '
+                    '--ylabel "Interface Errors" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+        ]
+
+        for cmd in plot_commands:
+            subprocess.check_call(
+                    cmd.format(
+                        rift_install=os.environ.get('RIFT_INSTALL'),
+                        rift_artifacts=os.environ.get('RIFT_ARTIFACTS'),
+                        task_id=os.environ.get('AUTO_TASK_ID')
+                    ),
+                    shell=True
+            )
+
diff --git a/rwlaunchpad/ra/pytest/ns/test_onboard.py b/rwlaunchpad/ra/pytest/ns/test_onboard.py
new file mode 100644
index 0000000..5951ce8
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/test_onboard.py
@@ -0,0 +1,408 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_onboard.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@brief Onboard descriptors
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import shutil
+import subprocess
+import time
+import uuid
+
+import rift.auto.mano
+import rift.auto.session
+
+import gi
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+
+from gi.repository import (
+    RwcalYang,
+    NsdYang,
+    RwNsrYang,
+    RwVnfrYang,
+    NsrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+@pytest.fixture(scope='module')
+def rwvnfr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfrYang)
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+
+@pytest.fixture(scope="module")
+def endpoint():
+    return "upload"
+
+def create_nsr(nsd, input_param_list, cloud_account_name):
+    """
+    Create the NSR record object
+
+    Arguments:
+         nsd              -  NSD
+         input_param_list - list of input-parameter objects
+
+    Return:
+         NSR object
+    """
+    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+
+    nsr.id = str(uuid.uuid4())
+    nsr.name = rift.auto.mano.resource_name(nsr.id)
+    nsr.short_name = "nsr_short_name"
+    nsr.description = "This is a description"
+    nsr.nsd.from_dict(nsd.as_dict())
+    nsr.admin_status = "ENABLED"
+    nsr.input_parameter.extend(input_param_list)
+    nsr.cloud_account = cloud_account_name
+
+    return nsr
+
+
+def upload_descriptor(
+        logger,
+        descriptor_file,
+        scheme,
+        cert,
+        host="127.0.0.1",
+        endpoint="upload"):
+    curl_cmd = ('curl --cert {cert} --key {key} -F "descriptor=@{file}" -k '
+                '{scheme}://{host}:4567/api/{endpoint}'.format(
+            cert=cert[0],
+            key=cert[1],
+            scheme=scheme,
+            endpoint=endpoint,
+            file=descriptor_file,
+            host=host,
+            ))
+
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_onboard_transaction_finished(
+        logger,
+        transaction_id,
+        scheme,
+        cert,
+        timeout=600,
+        host="127.0.0.1",
+        endpoint="upload"):
+
+    logger.info("Waiting for onboard trans_id %s to complete", transaction_id)
+    uri = '%s://%s:4567/api/%s/%s/state' % (scheme, host, endpoint, transaction_id)
+
+    elapsed = 0
+    start = time.time()
+    while elapsed < timeout:
+        reply = requests.get(uri, cert=cert, verify=False)
+        state = reply.json()
+        if state["status"] == "success":
+            break
+        if state["status"] != "pending":
+            raise DescriptorOnboardError(state)
+
+        time.sleep(1)
+        elapsed = time.time() - start
+
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+    logger.info("Descriptor onboard was successful")
+
+
+def onboard_descriptor(host, file_name, logger, endpoint, scheme, cert):
+    """On-board/update the descriptor.
+
+    Args:
+        host (str): Launchpad IP
+        file_name (str): Full file path.
+        logger: Logger instance
+        endpoint (str): endpoint to be used for the upload operation.
+
+    """
+    logger.info("Onboarding package: %s", file_name)
+    trans_id = upload_descriptor(
+            logger,
+            file_name,
+            scheme,
+            cert,
+            host=host,
+            endpoint=endpoint)
+    wait_onboard_transaction_finished(
+        logger,
+        trans_id,
+        scheme,
+        cert,
+        host=host,
+        endpoint=endpoint)
+
+def terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger, wait_after_kill=True):
+    """
+    Terminate the instance and check if the record is deleted.
+
+    Asserts:
+    1. NSR record is deleted from instance-config.
+
+    """
+    logger.debug("Terminating NSRs")
+
+    nsr_path = "/ns-instance-config"
+    nsr = rwnsr_proxy.get_config(nsr_path)
+    nsrs = nsr.nsr
+
+    xpaths = []
+    for nsr in nsrs:
+        xpath = "/ns-instance-config/nsr[id='{}']".format(nsr.id)
+        rwnsr_proxy.delete_config(xpath)
+        xpaths.append(xpath)
+
+    if wait_after_kill:
+        time.sleep(30)
+    else:
+        time.sleep(5)
+
+    for xpath in xpaths:
+        nsr = rwnsr_proxy.get_config(xpath)
+        assert nsr is None
+
+    # Get the ns-instance-config
+    ns_instance_config = rwnsr_proxy.get_config("/ns-instance-config")
+
+    # Termination tests
+    vnfr = "/vnfr-catalog/vnfr"
+    vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
+    assert vnfrs is None or len(vnfrs.vnfr) == 0
+
+    # nsr = "/ns-instance-opdata/nsr"
+    # nsrs = rwnsr_proxy.get(nsr, list_obj=True)
+    # assert len(nsrs.nsr) == 0
+
+
+
+@pytest.mark.setup('nsr')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestNsrStart(object):
+    """A brief overview of the steps performed.
+    1. Generate & on-board new descriptors
+    2. Start the NSR 
+    """
+
+    def test_upload_descriptors(
+            self,
+            logger,
+            vnfd_proxy,
+            nsd_proxy,
+            mgmt_session,
+            scheme,
+            cert,
+            descriptors
+        ):
+        """Generates & On-boards the descriptors.
+        """
+        endpoint = "upload"
+
+        for file_name in descriptors:
+            onboard_descriptor(
+                    mgmt_session.host,
+                    file_name,
+                    logger,
+                    endpoint,
+                    scheme,
+                    cert)
+
+        descriptor_vnfds, descriptor_nsd = descriptors[:-1], descriptors[-1]
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        actual_vnfds = catalog.vnfd
+        assert len(actual_vnfds) == len(descriptor_vnfds), \
+                "There should {} vnfds".format(len(descriptor_vnfds))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        actual_nsds = catalog.nsd
+        assert len(actual_nsds) == 1, "There should only be a single nsd"
+
+    @pytest.mark.feature("upload-image")
+    def test_upload_images(self, descriptor_images, cloud_host, cloud_user, cloud_tenants):
+
+        openstack = rift.auto.mano.OpenstackManoSetup(
+                cloud_host,
+                cloud_user,
+                [(tenant, "private") for tenant in cloud_tenants])
+
+        for image_location in descriptor_images:
+            image = RwcalYang.ImageInfoItem.from_dict({
+                    'name': os.path.basename(image_location),
+                    'location': image_location,
+                    'disk_format': 'qcow2',
+                    'container_format': 'bare'})
+            openstack.create_image(image)
+
+
+    def test_set_scaling_params(self, nsd_proxy):
+        nsds = nsd_proxy.get('/nsd-catalog')
+        nsd = nsds.nsd[0]
+        for scaling_group in nsd.scaling_group_descriptor:
+            scaling_group.max_instance_count = 2
+
+        nsd_proxy.replace_config('/nsd-catalog/nsd[id="{}"]'.format(
+            nsd.id), nsd)
+
+
+    def test_instantiate_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account_name):
+
+        def verify_input_parameters(running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (run_input_param.xpath == config_param.xpath and
+                    run_input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+        descr_value = "New NSD Description"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd, input_parameters, cloud_account_name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+        assert nsr_opdata is not None
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+    def test_wait_for_nsr_started(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=240)
+
+
+@pytest.mark.teardown('nsr')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestNsrTeardown(object):
+    def test_terminate_nsr(self, rwvnfr_proxy, rwnsr_proxy, logger, cloud_type):
+        """
+        Terminate the instance and check if the record is deleted.
+
+        Asserts:
+        1. NSR record is deleted from instance-config.
+
+        """
+        logger.debug("Terminating NSR")
+
+        wait_after_kill = True
+        if cloud_type == "mock":
+            wait_after_kill = False
+
+        terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger, wait_after_kill=wait_after_kill)
+
+    def test_delete_records(self, nsd_proxy, vnfd_proxy):
+        """Delete the NSD & VNFD records
+
+        Asserts:
+            The records are deleted.
+        """
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            nsd_proxy.delete_config(xpath)
+
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/test_failover.py b/rwlaunchpad/ra/pytest/test_failover.py
new file mode 100755
index 0000000..40dd7d0
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/test_failover.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python3
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_failover.py
+@brief System test of stopping launchpad on master and
+validating configuration on standby
+"""
+import os
+import sys
+import time
+import argparse
+import subprocess
+
+import gi
+from gi.repository import RwVnfdYang
+from gi.repository import RwVnfrYang
+
+import rift.auto.proxy
+from rift.auto.session import NetconfSession
+
+def yield_vnfd_vnfr_pairs(proxy, nsr=None):
+    """
+    Yields tuples of vnfd & vnfr entries.
+
+    Args:
+        proxy (callable): Launchpad proxy
+        nsr (optional): If specified, only the vnfr & vnfd records of the NSR
+                are returned
+
+    Yields:
+        Tuple: VNFD and its corresponding VNFR entry
+    """
+    def get_vnfd(vnfd_id):
+        xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_id)
+        return proxy(RwVnfdYang).get(xpath)
+
+    vnfr = "/vnfr-catalog/vnfr"
+    print ("START")
+    vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True)
+    print ("STOP")
+    for vnfr in vnfrs.vnfr:
+
+        if nsr:
+            const_vnfr_ids = [const_vnfr.vnfr_id for const_vnfr in nsr.constituent_vnfr_ref]
+            if vnfr.id not in const_vnfr_ids:
+                continue
+
+        vnfd = get_vnfd(vnfr.vnfd_ref)
+        yield vnfd, vnfr
+
+def check_configuration_on_standby(standby_ip):
+    print ("Start- check_configuration_on_standby")
+    mgmt_session = NetconfSession(standby_ip)
+    mgmt_session.connect()
+    print ("Connected to proxy")
+
+    vnf_tuple = list(yield_vnfd_vnfr_pairs(mgmt_session.proxy))
+    assert len(vnf_tuple) == 2
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description='Test launchpad failover') 
+    parser.add_argument("--master-ip", action="store", dest="master_ip")
+    parser.add_argument("--standby-ip", action="store", dest="standby_ip")
+
+    args = parser.parse_args()
+
+    # 60 seconds should be more than enough time for Agent to be able
+    # to make confd as the new Master
+    time.sleep(60)
+    print ("Try fetching configuration from the old standby or the new Master\n")
+    check_configuration_on_standby(args.standby_ip)
diff --git a/rwlaunchpad/ra/pytest/test_launchpad.py b/rwlaunchpad/ra/pytest/test_launchpad.py
new file mode 100644
index 0000000..81f5b54
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/test_launchpad.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python3
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_launchpad.py
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 07/07/2016
+@brief System test of basic launchpad functionality
+"""
+
+import pytest
+
+import gi
+gi.require_version('RwsdnYang', '1.0')
+
+from gi.repository import RwsdnYang
+
+@pytest.mark.setup('sdn')
+@pytest.mark.feature('sdn')
+@pytest.mark.incremental
+class TestSdnSetup:
+    def test_create_odl_sdn_account(self, mgmt_session, sdn_account_name, sdn_account_type):
+        '''Configure sdn account
+
+        Asserts:
+            SDN name and accout type.
+        '''
+        proxy = mgmt_session.proxy(RwsdnYang)
+        sdn_account = RwsdnYang.SDNAccount(
+                name=sdn_account_name,
+                account_type=sdn_account_type)
+        xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
+        proxy.create_config(xpath, sdn_account)
+        sdn_account = proxy.get(xpath)
+
+@pytest.mark.depends('sdn')
+@pytest.mark.feature('sdn')
+@pytest.mark.incremental
+class TestSdn:
+    def test_show_odl_sdn_account(self, mgmt_session, sdn_account_name, sdn_account_type):
+        '''Showing sdn account configuration
+
+        Asserts:
+            sdn_account.account_type is what was configured
+        '''
+        proxy = mgmt_session.proxy(RwsdnYang)
+        xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
+        sdn_account = proxy.get_config(xpath)
+        assert sdn_account.account_type == sdn_account_type
+
+@pytest.mark.teardown('sdn')
+@pytest.mark.feature('sdn')
+@pytest.mark.incremental
+class TestSdnTeardown:
+    def test_delete_odl_sdn_account(self, mgmt_session, sdn_account_name):
+        '''Unconfigure sdn account'''
+        proxy = mgmt_session.proxy(RwsdnYang)
+        xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
+        proxy.delete_config(xpath)
+
+
+@pytest.mark.setup('launchpad')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestLaunchpadSetup:
+    def test_create_cloud_accounts(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+        '''Configure cloud accounts
+
+        Asserts:
+            Cloud name and cloud type details
+        '''
+        proxy = mgmt_session.proxy(cloud_module)
+        for cloud_account in cloud_accounts:
+            xpath = '{}[name="{}"]'.format(cloud_xpath, cloud_account.name)
+            proxy.replace_config(xpath, cloud_account)
+            response =  proxy.get(xpath)
+            assert response.name == cloud_account.name
+            assert response.account_type == cloud_account.account_type
+
+@pytest.mark.depends('launchpad')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestLaunchpad:
+    def test_account_connection_status(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+        '''Verify connection status on each cloud account
+
+        Asserts:
+            Cloud account is successfully connected
+        '''
+        proxy = mgmt_session.proxy(cloud_module)
+        for cloud_account in cloud_accounts:
+            proxy.wait_for(
+                '{}[name="{}"]/connection-status/status'.format(cloud_xpath, cloud_account.name),
+                'success',
+                timeout=30,
+                fail_on=['failure'])
+
+
+@pytest.mark.teardown('launchpad')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestLaunchpadTeardown:
+    def test_delete_cloud_accounts(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+        '''Unconfigure cloud_account'''
+        proxy = mgmt_session.proxy(cloud_module)
+        for cloud_account in cloud_accounts:
+            xpath = "{}[name='{}']".format(cloud_xpath, cloud_account.name)
+            proxy.delete_config(xpath)
diff --git a/rwlaunchpad/ra/pytest/test_launchpad_longevity.py b/rwlaunchpad/ra/pytest/test_launchpad_longevity.py
new file mode 100644
index 0000000..c8a4662
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/test_launchpad_longevity.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2016/01/04
+#
+
+import rift.vcs.vcs
+import time
+import gi
+
+def test_launchpad_longevity(mgmt_session, mgmt_domain_name):
+    time.sleep(60)
+    rift.vcs.vcs.wait_until_system_started(mgmt_session)
+
diff --git a/rwlaunchpad/ra/pytest/test_start_standby.py b/rwlaunchpad/ra/pytest/test_start_standby.py
new file mode 100755
index 0000000..cf0e5d9
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/test_start_standby.py
@@ -0,0 +1,78 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file  test_start_standby.py
+@brief This test starts the launchpad on a remote VM
+"""
+import argparse
+import sys
+import time
+import os
+import glob
+import subprocess
+import shlex
+import multiprocessing
+
+import rift.auto.session
+import rift.vcs.vcs
+
+def get_manifest_file():
+    artifacts_path = os.environ["RIFT_ARTIFACTS"]
+    manifest_files = glob.glob(artifacts_path + "/manifest*xml")
+    manifest_files.sort(key=lambda x: os.stat(x).st_mtime)
+    return manifest_files[0]
+
+def copy_manifest_to_remote(remote_ip, manifest_file):
+    print ("Copying manifest file {} to remote".format(manifest_file))
+    cmd = "scp {0} {1}:/tmp/manifest.xml".format(manifest_file, remote_ip)
+    print ("Running command: {}".format(cmd))
+    subprocess.check_call(cmd, shell=True)
+    
+
+def test_start_lp_remote(remote_ip):
+    rift_root = os.environ.get('HOME_RIFT', os.environ.get('RIFT_ROOT'))
+    rift_install = os.environ.get('RIFT_INSTALL')
+
+    copy_manifest_to_remote(remote_ip, get_manifest_file())
+
+    cmd_template = ("ssh_root {remote_ip} -q -o BatchMode=yes -o "
+    " UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -- "
+    " \"rm -rf /tmp/corosync; cd {rift_install}; {rift_root}/rift-shell -e -- {rift_install}/usr/bin/rwmain -m /tmp/manifest.xml\"").format(
+      remote_ip=remote_ip,
+      rift_root=rift_root,
+      rift_install=rift_install)
+
+    def start_lp(cmd):
+        print ("Running cmd: {}".format(cmd))
+        subprocess.call(shlex.split(cmd))
+
+    print ("Starting launchpad on remote VM: {}".format(cmd_template))
+    p = multiprocessing.Process(target=start_lp, args=(cmd_template,))
+    p.daemon = True
+    p.start()
+    print ("Standby system started")
+    time.sleep(60)
+    pass
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description='Start standby LP')
+    parser.add_argument("--remote-ip", action="store", dest="remote_ip")
+
+    args = parser.parse_args()
+
+    test_start_lp_remote(args.remote_ip)
diff --git a/rwlaunchpad/ra/racfg/multi_tenant_systest_openstack.racfg b/rwlaunchpad/ra/racfg/multi_tenant_systest_openstack.racfg
new file mode 100644
index 0000000..c9adde4
--- /dev/null
+++ b/rwlaunchpad/ra/racfg/multi_tenant_systest_openstack.racfg
@@ -0,0 +1,19 @@
+{
+  "test_name":"TC_MULTI_TENANT_OPENSTACK",
+  "commandline":"./launchpad_systest --test-name 'TC_MULTI_TENANT_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --user={user} {tenants} --sysinfo",
+  "test_description":"System test for multiple tenants(Openstack)",
+  "required_tenants":2,
+  "run_as_root": false,
+  "status":"working",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 1800,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/multivm_vnf_slb_systest.racfg b/rwlaunchpad/ra/racfg/multivm_vnf_slb_systest.racfg
new file mode 100644
index 0000000..2294b91
--- /dev/null
+++ b/rwlaunchpad/ra/racfg/multivm_vnf_slb_systest.racfg
@@ -0,0 +1,17 @@
+{
+  "test_name":"TC_MULTI_VM_VNF_SLB",
+  "commandline":"./multi_vm_vnf_slb_systest.sh --test-name 'TC_MULTI_VM_VNF_SLB' --cloud-type openstack --cloud-host={cloud_host}  --user={user} {tenants}",
+  "test_description":"System test for scriptable load balancer with Multi-VMs VNFs",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
diff --git a/rwlaunchpad/ra/racfg/multivm_vnf_trafgen_systest.racfg b/rwlaunchpad/ra/racfg/multivm_vnf_trafgen_systest.racfg
new file mode 100755
index 0000000..3879146
--- /dev/null
+++ b/rwlaunchpad/ra/racfg/multivm_vnf_trafgen_systest.racfg
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_MULTI_VM_VNF_TRAFGEN",
+  "commandline":"./multi_vm_vnf_trafgen_systest.sh --test-name 'TC_MULTI_VM_VNF_TRAFGEN' --cloud-type openstack --cloud-host={cloud_host}  --user={user}  {tenants}",
+  "test_description":"System test for trafgen application with Multi-VMs VNFs",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_lp_ha_systest_openstack.racfg b/rwlaunchpad/ra/racfg/pingpong_lp_ha_systest_openstack.racfg
new file mode 100644
index 0000000..2887649
--- /dev/null
+++ b/rwlaunchpad/ra/racfg/pingpong_lp_ha_systest_openstack.racfg
@@ -0,0 +1,23 @@
+{
+  "test_name":"TC_PINGPONG_LP_HA_OPENSTACK",
+  "commandline":"./pingpong_lp_ha_systest --test-name 'TC_PINGPONG_LP_HA_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --user={user} {tenants} --ha-mode LS --expanded",
+  "test_description":"System test for standalone Launchpad (Openstack) with High availability",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2600,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    },
+    {
+      "name": "rift_auto_launchpad_standby",
+      "memory":4096,
+      "cpus":2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_records_systest_cloudsim.racfg b/rwlaunchpad/ra/racfg/pingpong_records_systest_cloudsim.racfg
new file mode 100644
index 0000000..25e969f
--- /dev/null
+++ b/rwlaunchpad/ra/racfg/pingpong_records_systest_cloudsim.racfg
@@ -0,0 +1,19 @@
+{
+  "test_name":"TC_PINGPONG_RECORDS_CLOUDSIM",
+  "commandline":"./pingpong_records_systest --test-name 'TC_PINGPONG_RECORDS_CLOUDSIM' --sysinfo --netconf --restconf",
+  "test_description":"System test for ping and pong vnf (Cloudsim)",
+  "run_as_root": true,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2600,
+  "networks":[],
+  "target_vm":"rift_auto_launchpad",
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 16384,
+      "cpus": 4
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack.racfg b/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack.racfg
new file mode 100644
index 0000000..62940eb
--- /dev/null
+++ b/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack.racfg
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_PINGPONG_RECORDS_OPENSTACK",
+  "commandline":"./pingpong_records_systest --test-name 'TC_PINGPONG_RECORDS_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo  --user={user} {tenants} --netconf --restconf",
+  "test_description":"System test for ping and pong vnf (Openstack)",
+  "run_as_root": true,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2600,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack_xml.racfg b/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack_xml.racfg
new file mode 100644
index 0000000..76b7c66
--- /dev/null
+++ b/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack_xml.racfg
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_PINGPONG_RECORDS_OPENSTACK_XML",
+  "commandline":"./pingpong_records_systest  --test-name 'TC_PINGPONG_RECORDS_OPENSTACK_XML' --cloud-type 'openstack' --sysinfo --use-xml-mode --cloud-host={cloud_host} --user={user} {tenants} --restconf",
+  "test_description":"System test for ping and pong vnf (Openstack)",
+  "run_as_root": true,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2600,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_scaling_systest_openstack.racfg b/rwlaunchpad/ra/racfg/pingpong_scaling_systest_openstack.racfg
new file mode 100644
index 0000000..7d6b30e
--- /dev/null
+++ b/rwlaunchpad/ra/racfg/pingpong_scaling_systest_openstack.racfg
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_PINGPONG_SCALING_OPENSTACK",
+  "commandline":"./pingpong_scaling_systest --cloud-type 'openstack' --cloud-host={cloud_host}  --user={user} {tenants}",
+  "test_description":"Scaling system test for ping and pong vnf (Openstack)",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack.racfg b/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack.racfg
new file mode 100644
index 0000000..2f4388d
--- /dev/null
+++ b/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack.racfg
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_PINGPONG_VNF_RELOAD_OPENSTACK",
+  "commandline":"./pingpong_vnf_reload_systest  --test-name 'TC_PINGPONG_VNF_RELOAD_OPENSTACK' --cloud-type 'openstack' --sysinfo --cloud-host={cloud_host} --user={user} {tenants} --restconf",
+  "test_description":"System test for ping pong vnf reload(Openstack)",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack_xml.racfg b/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack_xml.racfg
new file mode 100644
index 0000000..ce44c75
--- /dev/null
+++ b/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack_xml.racfg
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_PINGPONG_VNF_RELOAD_OPENSTACK_XML",
+  "commandline":"./pingpong_vnf_reload_systest  --test-name 'TC_PINGPONG_VNF_RELOAD_OPENSTACK_XML' --cloud-type 'openstack' --sysinfo --use-xml-mode --cloud-host={cloud_host} --user={user} {tenants} --restconf",
+  "test_description":"System test for ping pong vnf reload(Openstack)",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_vnf_systest_cloudsim.racfg b/rwlaunchpad/ra/racfg/pingpong_vnf_systest_cloudsim.racfg
new file mode 100644
index 0000000..c2f8f0c
--- /dev/null
+++ b/rwlaunchpad/ra/racfg/pingpong_vnf_systest_cloudsim.racfg
@@ -0,0 +1,19 @@
+{
+  "test_name":"TC_PINGPONG_VNF_CLOUDSIM",
+  "commandline":"./pingpong_vnf_systest --test-name 'TC_PINGPONG_VNF_CLOUDSIM'",
+  "target_vm":"VM",
+  "test_description":"System test for ping and pong vnf",
+  "run_as_root": true,
+  "status":"broken",
+  "keywords":["nightly","smoke","smoke_stable","MANO","cloudsim"],
+  "timelimit": 1800,
+  "networks":[],
+  "vms":[
+    {
+      "name": "VM",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_vnf_systest_openstack.racfg b/rwlaunchpad/ra/racfg/pingpong_vnf_systest_openstack.racfg
new file mode 100644
index 0000000..91cd1ad
--- /dev/null
+++ b/rwlaunchpad/ra/racfg/pingpong_vnf_systest_openstack.racfg
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_PINGPONG_VNF_OPENSTACK",
+  "commandline":"./pingpong_vnf_systest --test-name 'TC_PINGPONG_VNF_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host}  --user={user} {tenants} --sysinfo",
+  "test_description":"System test for ping and pong vnf (Openstack)",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/recovery_systest.racfg b/rwlaunchpad/ra/racfg/recovery_systest.racfg
new file mode 100644
index 0000000..6d0db13
--- /dev/null
+++ b/rwlaunchpad/ra/racfg/recovery_systest.racfg
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_TASKLET_RECOVERY_OPENSTACK",
+  "commandline":"./pingpong_recovery_systest --test-name 'TC_TASKLET_RECOVERY_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo  --user={user} {tenants} --netconf",
+  "test_description":"System test for testing the DTS recovery feature of tasklets (Openstack)",
+  "run_as_root": false,
+  "status":"working",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 8192,
+      "cpus": 4
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/scaling_systest.racfg b/rwlaunchpad/ra/racfg/scaling_systest.racfg
new file mode 100644
index 0000000..2d8744d
--- /dev/null
+++ b/rwlaunchpad/ra/racfg/scaling_systest.racfg
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_SCALING_OPENSTACK",
+  "commandline":"./scaling_systest --test-name 'TC_SCALING_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --tenant={tenant}  --sysinfo",
+  "test_description":"System test for scaling HAProxy vnf (Openstack)",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/scaling_systest b/rwlaunchpad/ra/scaling_systest
new file mode 100755
index 0000000..bb37bf2
--- /dev/null
+++ b/rwlaunchpad/ra/scaling_systest
@@ -0,0 +1,41 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Varun Prasad
+# Creation Date: 2016/04/12
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+
+SCRIPT_TEST="py.test -v \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/ns/test_onboard.py \
+            ${PYTEST_DIR}/system/ns/haproxy/test_scaling.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
diff --git a/rwlaunchpad/test/CMakeLists.txt b/rwlaunchpad/test/CMakeLists.txt
new file mode 100644
index 0000000..bd1a51e
--- /dev/null
+++ b/rwlaunchpad/test/CMakeLists.txt
@@ -0,0 +1,65 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Joshua Downer
+# Creation Date: 10/01/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+install(
+  PROGRAMS
+    launchpad.py
+    DESTINATION demos
+  COMPONENT ${PKG_LONG_NAME}
+  )
+
+install(
+  FILES
+    pytest/lp_test.py
+  DESTINATION
+    usr/rift/systemtest/pytest/launchpad
+  COMPONENT ${PKG_LONG_NAME}
+  )
+
+install(
+  PROGRAMS
+    launchpad_recovery
+  DESTINATION
+    usr/rift/systemtest/launchpad
+  COMPONENT ${PKG_LONG_NAME}
+  )
+
+install(
+  PROGRAMS
+    launchpad
+  DESTINATION usr/bin
+  COMPONENT rwcal-1.0
+  )
+
+rift_py3test(utest_rwmonitor
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_rwmonitor.py
+  )
+
+rift_py3test(utest_rwnsm
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_rwnsm.py
+  )
+
+rift_py3test(tosca_ut
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/tosca_ut.py
+  )
diff --git a/rwlaunchpad/test/launchpad b/rwlaunchpad/test/launchpad
new file mode 100644
index 0000000..6e423ac
--- /dev/null
+++ b/rwlaunchpad/test/launchpad
@@ -0,0 +1,145 @@
+#!/usr/bin/env python3
+
+import argparse
+import contextlib
+import os
+import signal
+import subprocess
+import sys
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwLog', '1.0')
+
+
+TEST_PARSER = "test"
+
+
+class PyTestRunner:
+    SYS_CMD = "demos/launchpad.py -m ethsim --skip-prepare-vm -c"
+    CLOUDSIM_CMD = "cloudsim start"
+
+    @property
+    def rift_install(self):
+        return os.getenv('RIFT_INSTALL')
+
+    @property
+    def account_script(self):
+        return os.path.join(
+                self.rift_install,
+                "usr/rift/systemtest/pytest/mission_control/test_mission_control.py")
+
+    @property
+    def onboard_script(self):
+        return os.path.join(
+                self.rift_install,
+                "usr/rift/systemtest/pytest/mission_control/pingpong_vnf/test_onboard_vnf.py")
+
+    @property
+    def records_script(self):
+        return os.path.join(
+                self.rift_install,
+                "usr/rift/systemtest/pytest/mission_control/pingpong_vnf/test_records.py")
+
+    def run_cmd(self, scripts=None, cal_account="mock"):
+        scripts = scripts or [self.account_script, self.onboard_script]
+
+        cmd = "py.test -v "
+
+        # In mock-cal mode we don't need the images.
+        if cal_account == "mock":
+            cmd += "--{} --lp-standalone --network-service pingpong_noimg ".format(cal_account)
+        else:
+            cmd += "--{} --lp-standalone --network-service pingpong ".format(cal_account)
+
+        cmd += " ".join(scripts)
+        subprocess.call(cmd, shell=True)
+
+    @contextlib.contextmanager
+    def system_start(self, debug_mode=False, cal_account="mock"):
+
+
+        os.environ['LD_PRELOAD'] = os.path.join(
+                self.rift_install,
+                "usr/lib/rift/preloads/librwxercespreload.so")
+
+        sys_cmd = os.path.join(self.rift_install, self.SYS_CMD)
+        if debug_mode:
+            sys_cmd += " --mock-cli"
+
+        process = subprocess.Popen(
+            sys_cmd,
+            shell=True,
+            preexec_fn=os.setsid)
+
+        cloudsim_process = None
+        if cal_account == "lxc":
+            # If in LXC start the cloudsim server.
+            cloudsim_process = subprocess.Popen(
+                PyTestRunner.CLOUDSIM_CMD,
+                shell=True,
+                preexec_fn=os.setsid)
+
+        def kill():
+            os.killpg(process.pid, signal.SIGTERM)
+            if cloudsim_process:
+                os.killpg(cloudsim_process.pid, signal.SIGTERM)
+                cloudsim_process.wait()
+
+            process.wait()
+
+        signal.signal(signal.SIGHUP, kill)
+        signal.signal(signal.SIGTERM, kill)
+
+        yield
+
+        kill()
+
+
+def test_launchpad(args):
+    pytest = PyTestRunner()
+
+    scripts = None
+    if args.cal == "lxc":
+        scripts = [pytest.account_script, pytest.onboard_script, pytest.records_script]
+
+    with pytest.system_start(cal_account=args.cal):
+        pytest.run_cmd(scripts=scripts, cal_account=args.cal)
+
+
+def parse(arguments):
+    parser = argparse.ArgumentParser(description=__doc__,
+                                    formatter_class=argparse.RawDescriptionHelpFormatter)
+    parser.add_argument(
+            '--log-level', '-l',
+            default="WARNING",
+            type=str,
+            choices=["INFO", "DEBUG", "WARNING", "ERROR"],
+            help="Set log level, defaults to warning and above.")
+
+    subparsers = parser.add_subparsers()
+
+    start_parser = subparsers.add_parser(TEST_PARSER, help="Test the LP")
+    start_parser.add_argument(
+            '--cal', "-c",
+            help="Run the server in the foreground. The logs are sent to console.",
+            default="mock",
+            choices=["lxc", "mock"])
+    start_parser.set_defaults(which=TEST_PARSER)
+
+    args = parser.parse_args(arguments)
+
+    return args
+
+
+def main(args):
+
+    args = parse(args)
+
+    if args.which == TEST_PARSER:
+        test_launchpad(args)
+
+
+if __name__ == "__main__":
+    main(sys.argv[1:])
\ No newline at end of file
diff --git a/rwlaunchpad/test/launchpad.py b/rwlaunchpad/test/launchpad.py
new file mode 100755
index 0000000..239f91b
--- /dev/null
+++ b/rwlaunchpad/test/launchpad.py
@@ -0,0 +1,520 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import logging
+import os
+import resource
+import socket
+import sys
+import subprocess
+import shlex
+import shutil
+import netifaces
+
+from rift.rwlib.util import certs
+import rift.rwcal.cloudsim
+import rift.rwcal.cloudsim.net
+import rift.vcs
+import rift.vcs.core as core
+import rift.vcs.demo
+import rift.vcs.vms
+
+import rift.rwcal.cloudsim
+import rift.rwcal.cloudsim.net
+
+from rift.vcs.ext import ClassProperty
+
+logger = logging.getLogger(__name__)
+
+
+class NsmTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a network services manager tasklet.
+    """
+
+    def __init__(self, name='network-services-manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a NsmTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(NsmTasklet, self).__init__(name=name, uid=uid,
+                                         config_ready=config_ready,
+                                         recovery_action=recovery_action,
+                                         data_storetype=data_storetype,
+                                        )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwnsmtasklet')
+    plugin_name = ClassProperty('rwnsmtasklet')
+
+
+class VnsTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a network services manager tasklet.
+    """
+
+    def __init__(self, name='virtual-network-service', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a VnsTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(VnsTasklet, self).__init__(name=name, uid=uid,
+                                         config_ready=config_ready,
+                                         recovery_action=recovery_action,
+                                         data_storetype=data_storetype,
+                                        )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnstasklet')
+    plugin_name = ClassProperty('rwvnstasklet')
+
+
+class VnfmTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a virtual network function manager tasklet.
+    """
+
+    def __init__(self, name='virtual-network-function-manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a VnfmTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(VnfmTasklet, self).__init__(name=name, uid=uid,
+                                          config_ready=config_ready,
+                                          recovery_action=recovery_action,
+                                          data_storetype=data_storetype,
+                                         )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnfmtasklet')
+    plugin_name = ClassProperty('rwvnfmtasklet')
+
+
+class ResMgrTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a Resource Manager tasklet.
+    """
+
+    def __init__(self, name='Resource-Manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a ResMgrTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(ResMgrTasklet, self).__init__(name=name, uid=uid,
+                                            config_ready=config_ready,
+                                            recovery_action=recovery_action,
+                                            data_storetype=data_storetype,
+                                           )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwresmgrtasklet')
+    plugin_name = ClassProperty('rwresmgrtasklet')
+
+
+class ImageMgrTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a Image Manager tasklet.
+    """
+
+    def __init__(self, name='Image-Manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a Image Manager Tasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(ImageMgrTasklet, self).__init__(
+                name=name, uid=uid,
+                config_ready=config_ready,
+                recovery_action=recovery_action,
+                data_storetype=data_storetype,
+                )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwimagemgrtasklet')
+    plugin_name = ClassProperty('rwimagemgrtasklet')
+
+
+class MonitorTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a tasklet that is used to monitor NFVI metrics.
+    """
+
+    def __init__(self, name='nfvi-metrics-monitor', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a MonitorTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+
+        """
+        super(MonitorTasklet, self).__init__(name=name, uid=uid,
+                                             config_ready=config_ready,
+                                             recovery_action=recovery_action,
+                                             data_storetype=data_storetype,
+                                            )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwmonitor')
+    plugin_name = ClassProperty('rwmonitor')
+
+class RedisServer(rift.vcs.NativeProcess):
+    def __init__(self, name="RW.Redis.Server",
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        super(RedisServer, self).__init__(
+                name=name,
+                exe="/usr/bin/redis-server",
+                config_ready=config_ready,
+                recovery_action=recovery_action,
+                data_storetype=data_storetype,
+                )
+
+    @property
+    def args(self):
+        return "./usr/bin/active_redis.conf --port 9999"
+
+
+class MonitoringParameterTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a tasklet that is used to generate monitoring
+    parameters.
+    """
+
+    def __init__(self, name='Monitoring-Parameter', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a MonitoringParameterTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+
+        """
+        super(MonitoringParameterTasklet, self).__init__(name=name, uid=uid,
+                                             config_ready=config_ready,
+                                             recovery_action=recovery_action,
+                                             data_storetype=data_storetype,
+                                            )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwmonparam')
+    plugin_name = ClassProperty('rwmonparam')
+
+
+class AutoscalerTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a tasklet that is used to generate monitoring
+    parameters.
+    """
+
+    def __init__(self, name='Autoscaler', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a MonitoringParameterTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+
+        """
+        super(AutoscalerTasklet, self).__init__(name=name, uid=uid,
+                                             config_ready=config_ready,
+                                             recovery_action=recovery_action,
+                                             data_storetype=data_storetype,
+                                            )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwautoscaler')
+    plugin_name = ClassProperty('rwautoscaler')
+
+
+def get_ui_ssl_args():
+    """Returns the SSL parameter string for launchpad UI processes"""
+
+    try:
+        use_ssl, certfile_path, keyfile_path = certs.get_bootstrap_cert_and_key()
+    except certs.BootstrapSslMissingException:
+        logger.error('No bootstrap certificates found.  Disabling UI SSL')
+        use_ssl = False
+
+    # If we're not using SSL, no SSL arguments are necessary
+    if not use_ssl:
+        return ""
+
+    return "--enable-https --keyfile-path=%s --certfile-path=%s" % (keyfile_path, certfile_path)
+
+
+class UIServer(rift.vcs.NativeProcess):
+    def __init__(self, name="RW.MC.UI",
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        super(UIServer, self).__init__(
+                name=name,
+                exe="./usr/share/rw.ui/skyquake/scripts/launch_ui.sh",
+                config_ready=config_ready,
+                recovery_action=recovery_action,
+                data_storetype=data_storetype,
+                )
+
+    @property
+    def args(self):
+        return get_ui_ssl_args()
+
+class ConfigManagerTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a Resource Manager tasklet.
+    """
+
+    def __init__(self, name='Configuration-Manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a ConfigManagerTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(ConfigManagerTasklet, self).__init__(name=name, uid=uid,
+                                                   config_ready=config_ready,
+                                                   recovery_action=recovery_action,
+                                                   data_storetype=data_storetype,
+                                                  )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwconmantasklet')
+    plugin_name = ClassProperty('rwconmantasklet')
+
+class GlanceServer(rift.vcs.NativeProcess):
+    def __init__(self, name="glance-image-catalog",
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        super(GlanceServer, self).__init__(
+                name=name,
+                exe="./usr/bin/glance_start_wrapper",
+                config_ready=config_ready,
+                recovery_action=recovery_action,
+                data_storetype=data_storetype,
+                )
+
+    @property
+    def args(self):
+        return "./etc/glance"
+
+
+class Demo(rift.vcs.demo.Demo):
+    def __init__(self, no_ui=False, ha_mode=None, mgmt_ip_list=[], test_name=None):
+        procs = [
+            ConfigManagerTasklet(),
+            GlanceServer(),
+            rift.vcs.DtsRouterTasklet(),
+            rift.vcs.MsgBrokerTasklet(),
+            rift.vcs.RestPortForwardTasklet(),
+            rift.vcs.RestconfTasklet(),
+            rift.vcs.RiftCli(),
+            rift.vcs.uAgentTasklet(),
+            rift.vcs.Launchpad(),
+            ]
+
+        standby_procs = [
+            RedisServer(),
+            rift.vcs.DtsRouterTasklet(),
+            rift.vcs.MsgBrokerTasklet(),
+            ]
+
+        datastore = core.DataStore.BDB.value
+        if ha_mode:
+            procs.append(RedisServer())
+            datastore = core.DataStore.REDIS.value
+
+        if not no_ui:
+            procs.append(UIServer())
+
+        restart_procs = [
+              VnfmTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+              VnsTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+              MonitorTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+              MonitoringParameterTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+              NsmTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+              ResMgrTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+              ImageMgrTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+              AutoscalerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+            ]
+
+        if not mgmt_ip_list or len(mgmt_ip_list) == 0:
+            mgmt_ip_list.append("127.0.0.1")
+
+        colony = rift.vcs.core.Colony(name='top', uid=1)
+
+        lead_lp_vm = rift.vcs.VirtualMachine(
+              name='vm-launchpad-1',
+              ip=mgmt_ip_list[0],
+              procs=procs,
+              restart_procs=restart_procs,
+            )
+        lead_lp_vm.leader = True
+        colony.append(lead_lp_vm)
+
+        if ha_mode:
+            stby_lp_vm = rift.vcs.VirtualMachine(
+                  name='launchpad-vm-2',
+                  ip=mgmt_ip_list[1],
+                  procs=standby_procs,
+                  start=False,
+                )
+            # WA to Agent mode_active flag reset
+            stby_lp_vm.add_tasklet(rift.vcs.uAgentTasklet(), mode_active=False)
+            colony.append(stby_lp_vm)
+
+        sysinfo = rift.vcs.SystemInfo(
+                    mode='ethsim',
+                    zookeeper=rift.vcs.manifest.RaZookeeper(master_ip=mgmt_ip_list[0]),
+                    colonies=[colony],
+                    multi_broker=True,
+                    multi_dtsrouter=True,
+                    mgmt_ip_list=mgmt_ip_list,
+                    test_name=test_name,
+                  )
+
+        super(Demo, self).__init__(
+            # Construct the system. This system consists of 1 cluster in 1
+            # colony. The master cluster houses CLI and management VMs
+            sysinfo = sysinfo,
+
+            # Define the generic portmap.
+            port_map = {},
+
+            # Define a mapping from the placeholder logical names to the real
+            # port names for each of the different modes supported by this demo.
+            port_names = {
+                'ethsim': {
+                },
+                'pci': {
+                }
+            },
+
+            # Define the connectivity between logical port names.
+            port_groups = {},
+        )
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s')
+
+    # Create a parser which includes all generic demo arguments
+    parser = rift.vcs.demo.DemoArgParser()
+    parser.add_argument("--no-ui", action='store_true')
+    args = parser.parse_args(argv)
+
+    # Disable loading any kernel modules for the launchpad VM
+    # since it doesn't need it and it will fail within containers
+    os.environ["NO_KERNEL_MODS"] = "1"
+
+    # Remove the persistent Redis data
+    for f in os.listdir(os.environ["INSTALLDIR"]):
+        if f.endswith(".aof") or f.endswith(".rdb"):
+           os.remove(os.path.join(os.environ["INSTALLDIR"], f))
+    
+    # Remove the persistant DTS recovery files 
+    for f in os.listdir(os.environ["INSTALLDIR"]):
+        if f.endswith(".db"):
+            os.remove(os.path.join(os.environ["INSTALLDIR"], f))
+    try:
+        shutil.rmtree(os.path.join(os.environ["INSTALLDIR"], "zk/server-1"))
+        shutil.rmtree(os.path.join(os.environ["INSTALLDIR"], "var/rift/tmp*"))
+    except:
+        pass
+
+    ha_mode = args.ha_mode
+    mgmt_ip_list = [] if not args.mgmt_ip_list else args.mgmt_ip_list
+
+    #load demo info and create Demo object
+    demo = Demo(args.no_ui, ha_mode, mgmt_ip_list, args.test_name)
+
+    # Create the prepared system from the demo
+    system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args,
+              northbound_listing="cli_launchpad_schema_listing.txt",
+              netconf_trace_override=True)
+
+    confd_ip = socket.gethostbyname(socket.gethostname())
+    intf = netifaces.ifaddresses('eth0')
+    if intf and netifaces.AF_INET in intf and len(intf[netifaces.AF_INET]):
+       confd_ip = intf[netifaces.AF_INET][0]['addr']
+    rift.vcs.logger.configure_sink(config_file=None, confd_ip=confd_ip)
+
+    # Start the prepared system
+    system.start()
+
+
+if __name__ == "__main__":
+    resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY) )
+    try:
+        main()
+    except rift.vcs.demo.ReservationError:
+        print("ERROR: unable to retrieve a list of IP addresses from the reservation system")
+        sys.exit(1)
+    except rift.vcs.demo.MissingModeError:
+        print("ERROR: you need to provide a mode to run the script")
+        sys.exit(1)
+    finally:
+        os.system("stty sane")
diff --git a/rwlaunchpad/test/launchpad_recovery b/rwlaunchpad/test/launchpad_recovery
new file mode 100755
index 0000000..eea5d4a
--- /dev/null
+++ b/rwlaunchpad/test/launchpad_recovery
@@ -0,0 +1,793 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import asyncio
+import logging
+import os
+import sys
+import unittest
+import re
+import psutil
+import types
+
+import xmlrunner
+
+import gi
+gi.require_version('RwDtsToyTaskletYang', '1.0')
+gi.require_version('RwManifestYang', '1.0')
+gi.require_version('RwVcsYang', '1.0')
+
+import gi.repository.RwManifestYang as rwmanifest
+import gi.repository.RwVcsYang as rwvcs
+import gi.repository.RwDtsToyTaskletYang as toyyang
+import gi.repository.RwYang as RwYang
+import rift.auto.session
+import rift.vcs.vcs
+
+import rift.tasklets
+import rift.test.dts
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+class LaunchPad(rift.test.dts.AbstractDTSTest):
+    """
+    DTS GI interface unittests
+
+    Note:  Each tests uses a list of asyncio.Events for staging through the
+    test.  These are required here because we are bring up each coroutine
+    ("tasklet") at the same time and are not implementing any re-try
+    mechanisms.  For instance, this is used in numerous tests to make sure that
+    a publisher is up and ready before the subscriber sends queries.  Such
+    event lists should not be used in production software.
+    """
+    def setUp(self):
+        """
+        1. Creates an asyncio loop
+        2. Triggers the hook configure_test
+        """
+        def scheduler_tick(self, *args):
+            self.call_soon(self.stop)
+            self.run_forever()
+
+        # Init params: loop & timers
+        self.loop = asyncio.new_event_loop()
+
+        self.loop.scheduler_tick = types.MethodType(scheduler_tick, self.loop)
+
+        self.asyncio_timer = None
+        self.stop_timer = None
+        self.__class__.id_cnt += 1
+        self.configure_test(self.loop, self.__class__.id_cnt)
+
+    @classmethod
+    def configure_schema(cls):
+        schema =  RwYang.Model.load_and_merge_schema(rwvcs.get_schema(), 'librwcal_yang_gen.so', 'Rwcal')
+        cls.model = RwYang.Model.create_libncx()
+        cls.model.load_schema_ypbc(schema)
+        xml = cls.manifest.to_xml_v2(cls.model, 1)
+        xml = re.sub('rw-manifest:', '', xml)
+        xml = re.sub('<manifest xmlns:rw-manifest="http://riftio.com/ns/riftware-1.0/rw-manifest">', '<?xml version="1.0" ?>\n<manifest xmlns="http://riftio.com/ns/riftware-1.0/rw-manifest" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://riftio.com/ns/riftware-1.0/rw-manifest ./rw-manifest.xsd">', xml)
+        xml = '\n'.join(xml.split('\n')[1:]) 
+        with open('lptestmanifest.xml', 'w') as f:
+           f.write(str(xml))
+        f.close()
+        return schema
+
+
+    @classmethod
+    def configure_manifest(cls):
+        manifest = rwmanifest.Manifest()
+        manifest.bootstrap_phase = rwmanifest.BootstrapPhase.from_dict({
+            "rwmgmt": {
+                "northbound_listing": [ "cli_launchpad_schema_listing.txt" ]
+            }, 
+            "rwtasklet": {
+                "plugin_name": "rwinit-c"
+            }, 
+            "rwtrace": {
+                "enable": True, 
+                "level": 5, 
+            }, 
+            "log": {
+                "enable": True, 
+                "severity": 4, 
+                "bootstrap_time": 30, 
+                "console_severity": 4
+            }, 
+            "ip_addrs_list": [
+                {
+                    "ip_addr": "127.0.0.1", 
+                }
+            ], 
+            "zookeeper": {
+                "master_ip": "127.0.0.1", 
+                "unique_ports": False, 
+                "zake": False
+            }, 
+            "serf": {
+                "start": True
+            }, 
+            "rwvm": {
+                "instances": [
+                    {
+                        "component_name": "msgbroker", 
+                        "config_ready": True
+                    }, 
+                    {
+                        "component_name": "dtsrouter", 
+                        "config_ready": True
+                    }
+                ]
+            }, 
+#           "rwsecurity": {
+#               "use_ssl": True, 
+#               "cert": "/net/mahi/localdisk/kelayath/ws/coreha/etc/ssl/current.cert", 
+#               "key": "/net/mahi/localdisk/kelayath/ws/coreha/etc/ssl/current.key"
+#           }
+        }) 
+        manifest.init_phase = rwmanifest.InitPhase.from_dict({
+            "environment": {
+                "python_variable": [
+                    "vm_ip_address = '127.0.0.1'",
+                    "rw_component_name = 'vm-launchpad'",
+                    "instance_id = 1",
+                    "component_type = 'rwvm'",
+                ], 
+                "component_name": "$python(rw_component_name)", 
+                "instance_id": "$python(instance_id)", 
+                "component_type": "$python(rw_component_type)"
+            }, 
+            "settings": {
+                "rwmsg": {
+                    "multi_broker": {
+                        "enable": False
+                    }
+                }, 
+                "rwdtsrouter": {
+                    "multi_dtsrouter": {
+                        "enable": True
+                    }
+                }, 
+                "rwvcs": {
+                    "collapse_each_rwvm": False, 
+                    "collapse_each_rwprocess": False
+                }
+            }
+        }) 
+        manifest.inventory = rwmanifest.Inventory.from_dict({
+            "component": [
+                {
+                    "component_name": "master", 
+                    "component_type": "RWCOLLECTION", 
+                    "rwcollection": {
+                        "collection_type": "rwcolony", 
+                        "event_list": {
+                            "event": [{
+                                "name": "onentry", 
+                                "action": [{
+                                    "name": "Start vm-launchpad for master", 
+                                    "start": {
+                                        "python_variable": ["vm_ip_address = '127.0.0.1'"], 
+                                        "component_name": "vm-launchpad", 
+                                        "instance_id": "1", 
+                                        "config_ready": True
+                                    }
+                                }]
+                            }]
+                        }
+                    }
+                }, 
+                {
+                    "component_name": "vm-launchpad", 
+                    "component_type": "RWVM", 
+                    "rwvm": {
+                        "leader": True, 
+                        "event_list": {
+                            "event": [{
+                                "name": "onentry", 
+                                "action": [
+                                    {
+                                        "name": "Start the master", 
+                                        "start": {
+                                            "component_name": "master", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+#                                   {
+#                                       "name": "Start the RW.CLI", 
+#                                       "start": {
+#                                           "component_name": "RW.CLI", 
+#                                           "recovery_action": "RESTART",
+#                                           "config_ready": True
+#                                       }
+#                                   }, 
+                                    {
+                                        "name": "Start the RW.Proc_1.Restconf", 
+                                        "start": {
+                                            "component_name": "RW.Proc_1.Restconf", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+#                                   {
+#                                       "name": "Start the RW.Proc_2.RestPortForward", 
+#                                       "start": {
+#                                           "component_name": "RW.Proc_2.RestPortForward", 
+#                                           "recovery_action": "RESTART",
+#                                           "config_ready": True
+#                                       }
+#                                   }, 
+                                    {
+                                        "name": "Start the RW.Proc_3.CalProxy", 
+                                        "start": {
+                                            "component_name": "RW.Proc_3.CalProxy", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.Proc_4.nfvi-metrics-monitor", 
+                                        "start": {
+                                            "component_name": "RW.Proc_4.nfvi-metrics-monitor", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.Proc_5.network-services-manager", 
+                                        "start": {
+                                            "component_name": "RW.Proc_5.network-services-manager", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.Proc_6.virtual-network-function-manager", 
+                                        "start": {
+                                            "component_name": "RW.Proc_6.virtual-network-function-manager", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.Proc_7.virtual-network-service", 
+                                        "start": {
+                                            "component_name": "RW.Proc_7.virtual-network-service", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.Proc_8.nfvi-metrics-monitor", 
+                                        "start": {
+                                            "component_name": "RW.Proc_8.nfvi-metrics-monitor", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.MC.UI", 
+                                        "start": {
+                                            "component_name": "RW.MC.UI", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+#                                   {
+#                                       "name": "Start the RW.COMPOSER.UI", 
+#                                       "start": {
+#                                           "component_name": "RW.COMPOSER.UI", 
+#                                           "config_ready": True
+#                                       }
+#                                   }, 
+                                    {
+                                        "name": "Start the RW.Proc_10.launchpad", 
+                                        "start": {
+                                            "component_name": "RW.Proc_10.launchpad", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.Proc_11.Resource-Manager", 
+                                        "start": {
+                                            "component_name": "RW.Proc_11.Resource-Manager", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.uAgent", 
+                                        "start": {
+                                            "python_variable": ["cmdargs_str = '--confd-proto AF_INET --confd-ip 127.0.0.1'"], 
+                                            "component_name": "RW.uAgent", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the logd", 
+                                        "start": {
+                                            "component_name": "logd", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }
+                                ]
+                            }]
+                        }
+                    }
+                }, 
+#               {
+#                   "component_name": "RW.CLI", 
+#                   "component_type": "PROC", 
+#                   "native_proc": {
+#                       "exe_path": "./usr/bin/rwcli", 
+#                       "args": "--netconf_host 127.0.0.1 --netconf_port 2022 --schema_listing cli_launchpad_schema_listing.txt", 
+#                   }
+#               }, 
+                {
+                    "component_name": "RW.Proc_1.Restconf", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start RW.Restconf for RW.Proc_1.Restconf", 
+                            "component_name": "RW.Restconf", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "RW.Restconf", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/restconf", 
+                        "plugin_name": "restconf"
+                    }
+                }, 
+#               {
+#                   "component_name": "RW.Proc_2.RestPortForward", 
+#                   "component_type": "RWPROC", 
+#                   "rwproc": {
+#                       "tasklet": [{
+#                           "name": "Start RW.RestPortForward for RW.Proc_2.RestPortForward", 
+#                           "component_name": "RW.RestPortForward", 
+#                           "recovery_action": "RESTART",
+#                           "config_ready": True
+#                       }]
+#                   }
+#               }, 
+#               {
+#                   "component_name": "RW.RestPortForward", 
+#                   "component_type": "RWTASKLET", 
+#                   "rwtasklet": {
+#                       "plugin_directory": "./usr/lib/rift/plugins/restportforward", 
+#                       "plugin_name": "restportforward"
+#                   }
+#               }, 
+                {
+                    "component_name": "RW.Proc_3.CalProxy", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start RW.CalProxy for RW.Proc_3.CalProxy", 
+                            "component_name": "RW.CalProxy", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "RW.CalProxy", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwcalproxytasklet", 
+                        "plugin_name": "rwcalproxytasklet"
+                    }
+                }, 
+                {
+                    "component_name": "RW.Proc_4.nfvi-metrics-monitor", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start nfvi-metrics-monitor for RW.Proc_4.nfvi-metrics-monitor", 
+                            "component_name": "nfvi-metrics-monitor", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "nfvi-metrics-monitor", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwmonitor", 
+                        "plugin_name": "rwmonitor"
+                    }
+                }, 
+                {
+                    "component_name": "RW.Proc_5.network-services-manager", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start network-services-manager for RW.Proc_5.network-services-manager", 
+                            "component_name": "network-services-manager", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "network-services-manager", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwnsmtasklet", 
+                        "plugin_name": "rwnsmtasklet"
+                    }
+                }, 
+                {
+                    "component_name": "RW.Proc_6.virtual-network-function-manager", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start virtual-network-function-manager for RW.Proc_6.virtual-network-function-manager", 
+                            "component_name": "virtual-network-function-manager", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "virtual-network-function-manager", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwvnfmtasklet", 
+                        "plugin_name": "rwvnfmtasklet"
+                    }
+                }, 
+                {
+                    "component_name": "RW.Proc_7.virtual-network-service", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start virtual-network-service for RW.Proc_7.virtual-network-service", 
+                            "component_name": "virtual-network-service", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "virtual-network-service", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwvnstasklet", 
+                        "plugin_name": "rwvnstasklet"
+                    }
+                }, 
+                {
+                    "component_name": "RW.Proc_8.nfvi-metrics-monitor", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start nfvi-metrics-monitor for RW.Proc_8.nfvi-metrics-monitor", 
+                            "component_name": "nfvi-metrics-monitor", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "RW.MC.UI", 
+                    "component_type": "PROC", 
+                    "native_proc": {
+                        "exe_path": "./usr/share/rw.ui/skyquake/scripts/launch_ui.sh", 
+                    }
+                },
+                {
+                    "component_name": "RW.COMPOSER.UI",
+                    "component_type": "PROC", 
+                    "native_proc": {
+                        "exe_path": "./usr/share/composer/scripts/launch_composer.sh",
+                    }
+                },
+                {
+                    "component_name": "RW.Proc_9.Configuration-Manager", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start Configuration-Manager for RW.Proc_9.Configuration-Manager", 
+                            "component_name": "Configuration-Manager", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "Configuration-Manager", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwconmantasklet", 
+                        "plugin_name": "rwconmantasklet"
+                    }
+                }, 
+                {
+                    "component_name": "RW.Proc_10.launchpad", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start launchpad for RW.Proc_10.launchpad", 
+                            "component_name": "launchpad", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "launchpad", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwlaunchpad", 
+                        "plugin_name": "rwlaunchpad"
+                    }
+                }, 
+                {
+                    "component_name": "RW.Proc_11.Resource-Manager", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start Resource-Manager for RW.Proc_11.Resource-Manager", 
+                            "component_name": "Resource-Manager", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "Resource-Manager", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwresmgrtasklet", 
+                        "plugin_name": "rwresmgrtasklet"
+                    }
+                }, 
+                {
+                    "component_name": "RW.uAgent", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwuagent-c", 
+                        "plugin_name": "rwuagent-c"
+                    }
+                }, 
+                {
+                    "component_name": "logd", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwlogd-c", 
+                        "plugin_name": "rwlogd-c"
+                    }
+                }, 
+                {
+                    "component_name": "msgbroker", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwmsgbroker-c", 
+                        "plugin_name": "rwmsgbroker-c"
+                    }
+                }, 
+                {
+                    "component_name": "dtsrouter", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwdtsrouter-c", 
+                        "plugin_name": "rwdtsrouter-c"
+                    }
+                }
+            ]
+        })
+        return manifest
+
+    def tearDown(self):
+        tasklist = { 'reaperd', 
+                     'rwlogd-report-c', 
+                     'launch_ui.sh' }
+        for proc in psutil.process_iter():
+          if proc.name() in tasklist:
+             print("killing", proc.name())
+             try:
+               proc.kill()
+             except:
+               print(proc.name(), "no longer exists")
+        self.loop.stop()
+        self.loop.close()
+
+
+class LaunchPadTest(LaunchPad):
+    """
+    DTS GI interface unittests
+
+    Note:  Each tests uses a list of asyncio.Events for staging through the
+    test.  These are required here because we are bring up each coroutine
+    ("tasklet") at the same time and are not implementing any re-try
+    mechanisms.  For instance, this is used in numerous tests to make sure that
+    a publisher is up and ready before the subscriber sends queries.  Such
+    event lists should not be used in production software.
+    """
+    @asyncio.coroutine
+    def inventory(self):
+        res_iter = yield from self.dts_mgmt.query_read('/rw-base:vcs/rw-base:info', flags=0)
+        for i in res_iter:
+           info_result = yield from i
+        components = info_result.result.components.component_info
+        recvd_list = {}
+        for component in components:
+            recvd_list[component.component_name] = (component.instance_id, 
+                         component.rwcomponent_parent, 
+                         component.component_type,
+                         component.state)
+        return recvd_list
+
+    @asyncio.coroutine
+    def issue_vcrash(self, component_type):
+#       critical_components = {'msgbroker', 'dtsrouter'}
+        critical_components = {'msgbroker', 'dtsrouter', 'RW.uAgent'}
+        comp_inventory = yield from self.inventory()
+        for component in comp_inventory:
+          if ((comp_inventory[component])[2] == component_type):
+              inst = (comp_inventory[component])[0]
+              if (component in critical_components):
+                  print(component, 'Marked as CRITICAL - Not restarting')
+              else:
+                  print('Crashing ', component_type,component)
+                  vcrash_input = rwvcs.VCrashInput(instance_name=component+'-'+str(inst))
+                  query_iter = yield from self.dts_mgmt.query_rpc( xpath="/rw-vcs:vcrash",
+                                                      flags=0, msg=vcrash_input)
+                  yield from asyncio.sleep(1, loop=self.loop)
+                  restarted_inventory = yield from self.inventory()
+                  self.assertTrue(restarted_inventory[component][3] != 'TO_RECOVER')
+
+    def test_launch_pad(self):
+        """
+        Verify the launchpad setup functions
+        The test will progress through stages defined by the events list:
+            0:  mission_control setup is brought up
+            2:  Tasklet/PROC/VM restarts tested to confirm recovery is proper
+        """
+
+        print("{{{{{{{{{{{{{{{{{{{{STARTING - mano recovery test")
+#       confd_host="127.0.0.1"
+
+        events = [asyncio.Event(loop=self.loop) for _ in range(2)]
+
+        @asyncio.coroutine
+        def sub():
+
+            tinfo = self.new_tinfo('sub')
+            self.dts_mgmt = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+
+            # Sleep for DTS registrations to complete
+            print('.........................................................')
+            print('........SLEEPING 80 seconds for system to come up........')
+            yield from asyncio.sleep(80, loop=self.loop)
+            print('........RESUMING........')
+
+            @asyncio.coroutine
+            def issue_vstop(component,inst,flag=0):
+                vstop_input = rwvcs.VStopInput(instance_name=component+'-'+(str(inst))) 
+                query_iter = yield from self.dts_mgmt.query_rpc( xpath="/rw-vcs:vstop",
+                                    flags=flag, msg=vstop_input)
+                yield from asyncio.sleep(1, loop=self.loop)
+
+
+
+            @asyncio.coroutine
+            def issue_vstart(component, parent, recover=False):
+                vstart_input = rwvcs.VStartInput()
+                vstart_input.component_name = component
+                vstart_input.parent_instance = parent
+                vstart_input.recover = recover
+                query_iter = yield from self.dts_mgmt.query_rpc( xpath="/rw-vcs:vstart",
+                                                      flags=0, msg=vstart_input)
+                yield from asyncio.sleep(1, loop=self.loop)
+
+            @asyncio.coroutine
+            def issue_start_stop(comp_inventory, component_type):
+#               critical_components = {'msgbroker', 'dtsrouter'}
+                critical_components = {'msgbroker', 'dtsrouter', 'RW.uAgent'}
+                for component in comp_inventory:
+                  if ((comp_inventory[component])[2] == component_type):
+                      inst = (comp_inventory[component])[0]
+                      parent = (comp_inventory[component])[1]
+                      if (component in critical_components):
+                          print(component, 'Marked as CRITICAL - Not restarting')
+                      else:
+                          print('Stopping ', component_type,component)
+                          yield from issue_vstop(component,inst)
+                          restarted_inventory = yield from self.inventory()
+#                         self.assertEqual(restarted_inventory[component][3],'TO_RECOVER')
+                          print('Starting ',component_type,component)
+                          yield from issue_vstart(component, parent, recover=True)
+                          restarted_inventory = yield from self.inventory()
+                          self.assertTrue(restarted_inventory[component][3] != 'TO_RECOVER')
+
+            yield from asyncio.sleep(20, loop=self.loop)
+            comp_inventory = yield from self.inventory()
+            yield from issue_start_stop(comp_inventory, 'RWTASKLET')
+#           yield from issue_start_stop(comp_inventory, 'RWPROC')
+#           yield from self.issue_vcrash('RWTASKLET')
+
+            yield from asyncio.sleep(20, loop=self.loop)
+            restarted_inventory = yield from self.inventory()
+#           critical_components = {'msgbroker', 'dtsrouter', 'RW.uAgent'}
+            for comp in comp_inventory:
+                self.assertEqual(str(comp_inventory[comp]), str(restarted_inventory[comp])) 
+#               if (comp not in critical_components):
+#                   inst = (comp_inventory[comp])[0]
+#                   yield from issue_vstop(comp,inst)
+
+            events[1].set()
+
+        asyncio.ensure_future(sub(), loop=self.loop)
+        self.run_until(events[1].is_set, timeout=260)
+
+
+def main():
+    plugin_dir = os.path.join(os.environ["RIFT_INSTALL"], "usr/lib/rift/plugins")
+    if 'DTS_TEST_PUB_DIR' not in os.environ:
+        os.environ['DTS_TEST_PUB_DIR'] = os.path.join(plugin_dir, 'dtstestpub')
+
+    if 'RIFT_NO_SUDO_REAPER' not in os.environ:
+        os.environ['RIFT_NO_SUDO_REAPER'] = '1'
+
+    if 'MESSAGE_BROKER_DIR' not in os.environ:
+        os.environ['MESSAGE_BROKER_DIR'] = os.path.join(plugin_dir, 'rwmsgbroker-c')
+
+    if 'ROUTER_DIR' not in os.environ:
+        os.environ['ROUTER_DIR'] = os.path.join(plugin_dir, 'rwdtsrouter-c')
+
+    if 'RW_VAR_RIFT' not in os.environ:
+        os.environ['RW_VAR_RIFT'] = '1'
+    
+    if 'INSTALLDIR' in os.environ:
+        os.chdir(os.environ.get('INSTALLDIR')) 
+
+#   if 'RWMSG_BROKER_SHUNT' not in os.environ:
+#       os.environ['RWMSG_BROKER_SHUNT'] = '1'
+
+    if 'TEST_ENVIRON' not in os.environ:
+        os.environ['TEST_ENVIRON'] = '1'
+
+    if 'RW_MANIFEST' not in os.environ:
+        os.environ['RW_MANIFEST'] = os.path.join(install_dir, 'lptestmanifest.xml')
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    args, _ = parser.parse_known_args()
+
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    unittest.main(testRunner=runner)
+
+if __name__ == '__main__':
+    main()
+
+# vim: sw=4
diff --git a/rwlaunchpad/test/mano_error_ut.py b/rwlaunchpad/test/mano_error_ut.py
new file mode 100755
index 0000000..e593cee
--- /dev/null
+++ b/rwlaunchpad/test/mano_error_ut.py
@@ -0,0 +1,898 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import asyncio
+import logging
+import os
+import sys
+import time
+import unittest
+import uuid
+
+import xmlrunner
+
+import gi.repository.RwDts as rwdts
+import gi.repository.RwNsmYang as rwnsmyang
+import gi.repository.RwResourceMgrYang as RwResourceMgrYang
+import gi.repository.RwLaunchpadYang as launchpadyang
+import rift.tasklets
+import rift.test.dts
+
+import mano_ut
+
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class OutOfResourceError(Exception):
+    pass
+
+
+class ComputeResourceRequestMockEventHandler(object):
+    def __init__(self):
+        self._pool_name = "vm_pool"
+        self._vdu_id = str(uuid.uuid4())
+        self._vdu_info = {
+                "vdu_id": self._vdu_id,
+                "state": "active",
+                "management_ip": "1.1.1.1",
+                "public_ip": "1.1.1.1",
+                "connection_points": [],
+                }
+
+        self._resource_state = "active"
+
+        self._event_id = None
+        self._request_info = None
+
+    def allocate(self, event_id, request_info):
+        self._event_id = event_id
+        self._request_info = request_info
+
+        self._vdu_info.update({
+            "name": self._request_info.name,
+            "flavor_id": self._request_info.flavor_id,
+            "image_id": self._request_info.image_id,
+            })
+
+        for cp in request_info.connection_points:
+            info_cp = dict(
+                name=cp.name,
+                virtual_link_id=cp.virtual_link_id,
+                vdu_id=self._vdu_id,
+                state="active",
+                ip_address="1.2.3.4",
+                )
+            info_cp = self._vdu_info["connection_points"].append(info_cp)
+
+    @property
+    def event_id(self):
+        return self._event_id
+
+    @property
+    def resource_state(self):
+        return self._resource_state
+
+    def set_active(self):
+        self._resource_state = "active"
+
+    def set_failed(self):
+        self._resource_state = "failed"
+
+    def set_pending(self):
+        self._resource_state = "pending"
+
+    @property
+    def response_msg(self):
+        resource_info = dict(
+                pool_name=self._pool_name,
+                resource_state=self.resource_state,
+                )
+        resource_info.update(self._vdu_info)
+
+        response = RwResourceMgrYang.VDUEventData.from_dict(dict(
+            event_id=self._event_id,
+            request_info=self._request_info.as_dict(),
+            resource_info=resource_info,
+            ))
+
+        return response.resource_info
+
+
+class NetworkResourceRequestMockEventHandler(object):
+    def __init__(self):
+        self._pool_name = "network_pool"
+        self._link_id = str(uuid.uuid4())
+        self._link_info = {
+                "virtual_link_id": self._link_id,
+                "state": "active",
+                }
+
+        self._resource_state = "active"
+
+        self._event_id = None
+        self._request_info = None
+
+    def allocate(self, event_id, request_info):
+        self._event_id = event_id
+        self._request_info = request_info
+
+        self._link_info.update({
+            "name": self._request_info.name,
+            "subnet": self._request_info.subnet,
+            })
+
+    @property
+    def event_id(self):
+        return self._event_id
+
+    @property
+    def resource_state(self):
+        return self._resource_state
+
+    def set_active(self):
+        self._resource_state = "active"
+
+    def set_failed(self):
+        self._resource_state = "failed"
+
+    def set_pending(self):
+        self._resource_state = "pending"
+
+    @property
+    def response_msg(self):
+        resource_info = dict(
+                pool_name=self._pool_name,
+                resource_state=self.resource_state,
+                )
+        resource_info.update(self._link_info)
+
+        response = RwResourceMgrYang.VirtualLinkEventData.from_dict(dict(
+            event_id=self._event_id,
+            request_info=self._request_info.as_dict(),
+            resource_info=resource_info,
+            ))
+
+        return response.resource_info
+
+
+class ResourceMgrMock(object):
+    VDU_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data"
+    VLINK_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data"
+
+    def __init__(self, dts, log, loop):
+        self._log = log
+        self._dts = dts
+        self._loop = loop
+        self._vdu_reg = None
+        self._link_reg = None
+
+        self._vdu_reg_event = asyncio.Event(loop=self._loop)
+        self._link_reg_event = asyncio.Event(loop=self._loop)
+
+        self._available_compute_handlers = []
+        self._available_network_handlers = []
+
+        self._used_compute_handlers = {}
+        self._used_network_handlers = {}
+
+        self._compute_allocate_requests = 0
+        self._network_allocate_requests = 0
+
+        self._registered = False
+
+    def _allocate_virtual_compute(self, event_id, request_info):
+        self._compute_allocate_requests += 1
+
+        if not self._available_compute_handlers:
+            raise OutOfResourceError("No more compute handlers")
+
+        handler = self._available_compute_handlers.pop()
+        handler.allocate(event_id, request_info)
+        self._used_compute_handlers[event_id] = handler
+
+        return handler.response_msg
+
+    def _allocate_virtual_network(self, event_id, request_info):
+        self._network_allocate_requests += 1
+
+        if not self._available_network_handlers:
+            raise OutOfResourceError("No more network handlers")
+
+        handler = self._available_network_handlers.pop()
+        handler.allocate(event_id, request_info)
+        self._used_network_handlers[event_id] = handler
+
+        return handler.response_msg
+
+    def _release_virtual_network(self, event_id):
+        del self._used_network_handlers[event_id]
+
+    def _release_virtual_compute(self, event_id):
+        del self._used_compute_handlers[event_id]
+
+    def _read_virtual_network(self, event_id):
+        return self._used_network_handlers[event_id].response_msg
+
+    def _read_virtual_compute(self, event_id):
+        return self._used_compute_handlers[event_id].response_msg
+
+    @asyncio.coroutine
+    def on_link_request_prepare(self, xact_info, action, ks_path, request_msg):
+        if not self._registered:
+            self._log.error("Got a prepare callback when not registered!")
+            xact_info.respond_xpath(rwdts.XactRspCode.NA)
+            return
+
+        self._log.debug("Received virtual-link on_prepare callback (self: %s, xact_info: %s, action: %s): %s",
+                        self, xact_info, action, request_msg)
+
+        response_info = None
+        response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+
+        schema = RwResourceMgrYang.VirtualLinkEventData().schema()
+        pathentry = schema.keyspec_to_entry(ks_path)
+
+        if action == rwdts.QueryAction.CREATE:
+            response_info = self._allocate_virtual_network(
+                    pathentry.key00.event_id,
+                    request_msg.request_info,
+                    )
+
+        elif action == rwdts.QueryAction.DELETE:
+            self._release_virtual_network(pathentry.key00.event_id)
+
+        elif action == rwdts.QueryAction.READ:
+            response_info = self._read_virtual_network(
+                    pathentry.key00.event_id
+                    )
+        else:
+            raise ValueError("Only read/create/delete actions available. Received action: %s" %(action))
+
+        self._log.debug("Responding with VirtualLinkInfo at xpath %s: %s.",
+                        response_xpath, response_info)
+
+        xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info)
+
+    @asyncio.coroutine
+    def on_vdu_request_prepare(self, xact_info, action, ks_path, request_msg):
+        if not self._registered:
+            self._log.error("Got a prepare callback when not registered!")
+            xact_info.respond_xpath(rwdts.XactRspCode.NA)
+            return
+
+        @asyncio.coroutine
+        def monitor_vdu_state(response_xpath, pathentry):
+            self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath)
+            loop_cnt = 120
+            while loop_cnt > 0:
+                self._log.debug("VDU state monitoring: Sleeping for 1 second ")
+                yield from asyncio.sleep(1, loop = self._loop)
+                try:
+                    response_info = self._read_virtual_compute(
+                            pathentry.key00.event_id
+                            )
+                except Exception as e:
+                    self._log.error(
+                            "VDU state monitoring: Received exception %s "
+                            "in VDU state monitoring for %s. Aborting monitoring",
+                            str(e), response_xpath
+                            )
+                    raise
+
+                if response_info.resource_state == 'active' or response_info.resource_state == 'failed':
+                    self._log.info(
+                            "VDU state monitoring: VDU reached terminal state."
+                            "Publishing VDU info: %s at path: %s",
+                            response_info, response_xpath
+                            )
+                    yield from self._dts.query_update(response_xpath,
+                                                      rwdts.XactFlag.ADVISE,
+                                                      response_info)
+                    return
+                else:
+                    loop_cnt -= 1
+
+            ### End of while loop. This is only possible if VDU did not reach active state
+            self._log.info("VDU state monitoring: VDU at xpath :%s did not reached active state in 120 seconds. Aborting monitoring",
+                           response_xpath)
+            response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+            response_info.resource_state = 'failed'
+            yield from self._dts.query_update(response_xpath,
+                                              rwdts.XactFlag.ADVISE,
+                                              response_info)
+            return
+
+        self._log.debug("Received vdu on_prepare callback (xact_info: %s, action: %s): %s",
+                        xact_info, action, request_msg)
+
+        response_info = None
+        response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+
+        schema = RwResourceMgrYang.VDUEventData().schema()
+        pathentry = schema.keyspec_to_entry(ks_path)
+
+        if action == rwdts.QueryAction.CREATE:
+            response_info = self._allocate_virtual_compute(
+                    pathentry.key00.event_id,
+                    request_msg.request_info,
+                    )
+            if response_info.resource_state == 'pending':
+                asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry),
+                                      loop = self._loop)
+
+        elif action == rwdts.QueryAction.DELETE:
+            self._release_virtual_compute(
+                    pathentry.key00.event_id
+                    )
+
+        elif action == rwdts.QueryAction.READ:
+            response_info = self._read_virtual_compute(
+                    pathentry.key00.event_id
+                    )
+        else:
+            raise ValueError("Only create/delete actions available. Received action: %s" %(action))
+
+        self._log.debug("Responding with VDUInfo at xpath %s: %s",
+                        response_xpath, response_info)
+
+        xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info)
+
+    @asyncio.coroutine
+    def register(self):
+        @asyncio.coroutine
+        def on_request_ready(registration, status):
+            self._log.debug("Got request ready event (registration: %s) (status: %s)",
+                            registration, status)
+
+            if registration == self._link_reg:
+                self._link_reg_event.set()
+            elif registration == self._vdu_reg:
+                self._vdu_reg_event.set()
+            else:
+                self._log.error("Unknown registration ready event: %s", registration)
+
+
+        with self._dts.group_create() as group:
+            self._log.debug("Registering for Link Resource Request using xpath: %s",
+                            ResourceMgrMock.VLINK_REQUEST_XPATH)
+
+            self._link_reg = group.register(
+                    xpath=ResourceMgrMock.VLINK_REQUEST_XPATH,
+                    handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
+                                                                  on_prepare=self.on_link_request_prepare),
+                    flags=rwdts.Flag.PUBLISHER)
+
+            self._log.debug("Registering for VDU Resource Request using xpath: %s",
+                            ResourceMgrMock.VDU_REQUEST_XPATH)
+
+            self._vdu_reg = group.register(
+                    xpath=ResourceMgrMock.VDU_REQUEST_XPATH,
+                    handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
+                                                                  on_prepare=self.on_vdu_request_prepare),
+                    flags=rwdts.Flag.PUBLISHER)
+
+        self._registered = True
+
+    def unregister(self):
+        self._link_reg.deregister()
+        self._vdu_reg.deregister()
+        self._registered = False
+
+    @asyncio.coroutine
+    def wait_ready(self, timeout=5):
+        self._log.debug("Waiting for all request registrations to become ready.")
+        yield from asyncio.wait([self._link_reg_event.wait(), self._vdu_reg_event.wait()],
+                                timeout=timeout, loop=self._loop)
+
+    def create_compute_mock_event_handler(self):
+        handler = ComputeResourceRequestMockEventHandler()
+        self._available_compute_handlers.append(handler)
+
+        return handler
+
+    def create_network_mock_event_handler(self):
+        handler = NetworkResourceRequestMockEventHandler()
+        self._available_network_handlers.append(handler)
+
+        return handler
+
+    @property
+    def num_compute_requests(self):
+        return self._compute_allocate_requests
+
+    @property
+    def num_network_requests(self):
+        return self._network_allocate_requests
+
+    @property
+    def num_allocated_compute_resources(self):
+        return len(self._used_compute_handlers)
+
+    @property
+    def num_allocated_network_resources(self):
+        return len(self._used_network_handlers)
+
+
+@unittest.skip('failing and needs rework')
+class ManoErrorTestCase(rift.test.dts.AbstractDTSTest):
+    """
+    DTS GI interface unittests
+
+    Note:  Each tests uses a list of asyncio.Events for staging through the
+    test.  These are required here because we are bring up each coroutine
+    ("tasklet") at the same time and are not implementing any re-try
+    mechanisms.  For instance, this is used in numerous tests to make sure that
+    a publisher is up and ready before the subscriber sends queries.  Such
+    event lists should not be used in production software.
+    """
+
+    @classmethod
+    def configure_suite(cls, rwmain):
+        plugin_dir = os.path.join(os.environ["RIFT_INSTALL"], "usr/lib/rift/plugins")
+        rwmain.add_tasklet(
+                os.path.join(plugin_dir, 'rwvns'),
+                'rwvnstasklet'
+                )
+
+        rwmain.add_tasklet(
+                os.path.join(plugin_dir, 'rwvnfm'),
+                'rwvnfmtasklet'
+                )
+
+        rwmain.add_tasklet(
+                os.path.join(plugin_dir, 'rwnsm'),
+                'rwnsmtasklet'
+                )
+
+        cls.waited_for_tasklets = False
+
+    @asyncio.coroutine
+    def register_mock_res_mgr(self):
+        self.res_mgr = ResourceMgrMock(
+                self.dts,
+                self.log,
+                self.loop,
+                )
+        yield from self.res_mgr.register()
+
+        self.log.info("Waiting for resource manager to be ready")
+        yield from self.res_mgr.wait_ready()
+
+    def unregister_mock_res_mgr(self):
+        self.res_mgr.unregister()
+
+    @classmethod
+    def configure_schema(cls):
+        return rwnsmyang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    @asyncio.coroutine
+    def wait_tasklets(self):
+        if not ManoErrorTestCase.waited_for_tasklets:
+            yield from asyncio.sleep(5, loop=self.loop)
+            ManoErrorTestCase.waited_for_tasklets = True
+
+    @asyncio.coroutine
+    def publish_desciptors(self, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1):
+        yield from self.ping_pong.publish_desciptors(
+                num_external_vlrs,
+                num_internal_vlrs,
+                num_ping_vms
+                )
+
+    def unpublish_descriptors(self):
+        self.ping_pong.unpublish_descriptors()
+
+    @asyncio.coroutine
+    def wait_until_nsr_active_or_failed(self, nsr_id, timeout_secs=20):
+        start_time = time.time()
+        while (time.time() - start_time) < timeout_secs:
+            nsrs = yield from self.querier.get_nsr_opdatas(nsr_id)
+            self.assertEqual(1, len(nsrs))
+            if nsrs[0].operational_status in ['running', 'failed']:
+                return
+
+            self.log.debug("Rcvd NSR with %s status", nsrs[0].operational_status)
+            yield from asyncio.sleep(2, loop=self.loop)
+
+        self.assertIn(nsrs[0].operational_status, ['running', 'failed'])
+
+    def verify_number_compute_requests(self, num_requests):
+        self.assertEqual(num_requests, self.res_mgr.num_compute_requests)
+
+    def verify_number_network_requests(self, num_requests):
+        self.assertEqual(num_requests, self.res_mgr.num_network_requests)
+
+    def verify_number_allocated_compute(self, num_allocated):
+        self.assertEqual(num_allocated, self.res_mgr.num_allocated_compute_resources)
+
+    def verify_number_allocated_network(self, num_allocated):
+        self.assertEqual(num_allocated, self.res_mgr.num_allocated_network_resources)
+
+    def allocate_network_handlers(self, num_networks):
+        return [self.res_mgr.create_network_mock_event_handler() for _ in range(num_networks)]
+
+    def allocate_compute_handlers(self, num_computes):
+        return [self.res_mgr.create_compute_mock_event_handler() for _ in range(num_computes)]
+
+    @asyncio.coroutine
+    def create_mock_launchpad_tasklet(self):
+        yield from mano_ut.create_mock_launchpad_tasklet(self.log, self.dts)
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", self.id())
+        self.tinfo = self.new_tinfo(self.id())
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+        self.ping_pong = mano_ut.PingPongDescriptorPublisher(self.log, self.loop, self.dts)
+        self.querier = mano_ut.ManoQuerier(self.log, self.dts)
+
+        # Add a task to wait for tasklets to come up
+        asyncio.ensure_future(self.wait_tasklets(), loop=self.loop)
+
+    @rift.test.dts.async_test
+    def test_fail_first_nsm_vlr(self):
+        yield from self.publish_desciptors(num_external_vlrs=2)
+        yield from self.register_mock_res_mgr()
+
+        nsr_id = yield from self.ping_pong.create_nsr()
+        yield from self.wait_until_nsr_active_or_failed(nsr_id)
+
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1)
+        yield from self.verify_nsr_state(nsr_id, "failed")
+        yield from self.verify_num_vlrs(1)
+        yield from self.verify_num_nsr_vlrs(nsr_id, 2)
+        yield from self.verify_num_vnfrs(0)
+
+        nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        yield from self.verify_vlr_state(nsr_vlrs[0], "failed")
+
+        self.verify_number_network_requests(1)
+        self.verify_number_compute_requests(0)
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        yield from self.terminate_nsr(nsr_id)
+
+        yield from self.verify_nsr_deleted(nsr_id)
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0)
+        yield from self.verify_num_vlrs(0)
+
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        self.unregister_mock_res_mgr()
+        self.unpublish_descriptors()
+
+    @rift.test.dts.async_test
+    def test_fail_second_nsm_vlr(self):
+        yield from self.publish_desciptors(num_external_vlrs=2)
+        yield from self.register_mock_res_mgr()
+        self.allocate_network_handlers(1)
+
+        nsr_id = yield from self.ping_pong.create_nsr()
+        yield from self.wait_until_nsr_active_or_failed(nsr_id)
+
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1)
+        yield from self.verify_nsr_state(nsr_id, "failed")
+        yield from self.verify_num_vlrs(2)
+        yield from self.verify_num_nsr_vlrs(nsr_id, 2)
+
+        nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        yield from self.verify_vlr_state(nsr_vlrs[0], "running")
+        yield from self.verify_vlr_state(nsr_vlrs[1], "failed")
+
+        self.verify_number_network_requests(2)
+        self.verify_number_compute_requests(0)
+        self.verify_number_allocated_network(1)
+        self.verify_number_allocated_compute(0)
+
+        yield from self.terminate_nsr(nsr_id)
+
+        yield from self.verify_nsr_deleted(nsr_id)
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0)
+        yield from self.verify_num_vlrs(0)
+
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        self.unregister_mock_res_mgr()
+        self.unpublish_descriptors()
+
+    @rift.test.dts.async_test
+    def test_fail_first_vnf_first_vlr(self):
+        yield from self.publish_desciptors(num_internal_vlrs=2)
+        yield from self.register_mock_res_mgr()
+        self.allocate_network_handlers(1)
+
+        nsr_id = yield from self.ping_pong.create_nsr()
+        yield from self.wait_until_nsr_active_or_failed(nsr_id)
+
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1)
+        yield from self.verify_nsr_state(nsr_id, "failed")
+        yield from self.verify_num_vlrs(2)
+        yield from self.verify_num_nsr_vlrs(nsr_id, 1)
+
+        nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        yield from self.verify_vlr_state(nsr_vlrs[0], "running")
+
+        yield from self.verify_num_nsr_vnfrs(nsr_id, 2)
+
+        # Verify only a single vnfr was instantiated and is failed
+        yield from self.verify_num_vnfrs(1)
+        nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id)
+        yield from self.verify_vnf_state(nsr_vnfs[0], "failed")
+
+        yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2)
+        vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0])
+        yield from self.verify_vlr_state(vnf_vlrs[0], "failed")
+
+        self.verify_number_network_requests(2)
+        self.verify_number_compute_requests(0)
+        self.verify_number_allocated_network(1)
+        self.verify_number_allocated_compute(0)
+
+        yield from self.terminate_nsr(nsr_id)
+
+        yield from self.verify_nsr_deleted(nsr_id)
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0)
+        yield from self.verify_num_vlrs(0)
+
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        self.unregister_mock_res_mgr()
+        self.unpublish_descriptors()
+
+    @rift.test.dts.async_test
+    def test_fail_first_vnf_second_vlr(self):
+        yield from self.publish_desciptors(num_internal_vlrs=2)
+        yield from self.register_mock_res_mgr()
+        self.allocate_network_handlers(2)
+
+        nsr_id = yield from self.ping_pong.create_nsr()
+        yield from self.wait_until_nsr_active_or_failed(nsr_id)
+
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1)
+        yield from self.verify_nsr_state(nsr_id, "failed")
+        yield from self.verify_num_vlrs(3)
+        yield from self.verify_num_nsr_vlrs(nsr_id, 1)
+
+        nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        yield from self.verify_vlr_state(nsr_vlrs[0], "running")
+
+        yield from self.verify_num_nsr_vnfrs(nsr_id, 2)
+
+        # Verify only a single vnfr was instantiated and is failed
+        yield from self.verify_num_vnfrs(1)
+        nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id)
+        yield from self.verify_vnf_state(nsr_vnfs[0], "failed")
+
+        yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2)
+        vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0])
+        yield from self.verify_vlr_state(vnf_vlrs[0], "running")
+        yield from self.verify_vlr_state(vnf_vlrs[1], "failed")
+
+        self.verify_number_network_requests(3)
+        self.verify_number_compute_requests(0)
+        self.verify_number_allocated_network(2)
+        self.verify_number_allocated_compute(0)
+
+        yield from self.terminate_nsr(nsr_id)
+
+        yield from self.verify_nsr_deleted(nsr_id)
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0)
+        yield from self.verify_num_vlrs(0)
+
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        self.unregister_mock_res_mgr()
+        self.unpublish_descriptors()
+
+    @rift.test.dts.async_test
+    def test_fail_first_vnf_first_vdu(self):
+        yield from self.publish_desciptors(num_internal_vlrs=2, num_ping_vms=2)
+        yield from self.register_mock_res_mgr()
+        yield from self.create_mock_launchpad_tasklet()
+        self.allocate_network_handlers(3)
+
+        nsr_id = yield from self.ping_pong.create_nsr()
+        yield from self.wait_until_nsr_active_or_failed(nsr_id)
+
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1)
+        yield from self.verify_nsr_state(nsr_id, "failed")
+        yield from self.verify_num_vlrs(3)
+        yield from self.verify_num_nsr_vlrs(nsr_id, 1)
+
+        nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        yield from self.verify_vlr_state(nsr_vlrs[0], "running")
+
+        yield from self.verify_num_nsr_vnfrs(nsr_id, 2)
+
+        # Verify only a single vnfr was instantiated and is failed
+        yield from self.verify_num_vnfrs(1)
+        nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id)
+        yield from self.verify_vnf_state(nsr_vnfs[0], "failed")
+
+        yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2)
+        vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0])
+        yield from self.verify_vlr_state(vnf_vlrs[0], "running")
+        yield from self.verify_vlr_state(vnf_vlrs[1], "running")
+
+        yield from self.verify_num_vnfr_vdus(nsr_vnfs[0], 2)
+        vdus = yield from self.get_vnf_vdus(nsr_vnfs[0])
+        self.verify_vdu_state(vdus[0], "failed")
+
+        self.verify_number_network_requests(3)
+        self.verify_number_compute_requests(1)
+        self.verify_number_allocated_network(3)
+        self.verify_number_allocated_compute(0)
+
+        yield from self.terminate_nsr(nsr_id)
+
+        yield from self.verify_nsr_deleted(nsr_id)
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0)
+        yield from self.verify_num_vlrs(0)
+
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        self.unregister_mock_res_mgr()
+        self.unpublish_descriptors()
+
+    @rift.test.dts.async_test
+    def test_fail_first_vnf_second_vdu(self):
+        yield from self.publish_desciptors(num_internal_vlrs=2, num_ping_vms=2)
+        yield from self.register_mock_res_mgr()
+        yield from self.create_mock_launchpad_tasklet()
+        self.allocate_network_handlers(3)
+        self.allocate_compute_handlers(1)
+
+        nsr_id = yield from self.ping_pong.create_nsr()
+        yield from self.wait_until_nsr_active_or_failed(nsr_id)
+
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1)
+        yield from self.verify_nsr_state(nsr_id, "failed")
+        yield from self.verify_num_vlrs(3)
+        yield from self.verify_num_nsr_vlrs(nsr_id, 1)
+
+        nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        yield from self.verify_vlr_state(nsr_vlrs[0], "running")
+
+        yield from self.verify_num_nsr_vnfrs(nsr_id, 2)
+
+        # Verify only a single vnfr was instantiated and is failed
+        yield from self.verify_num_vnfrs(1)
+        nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id)
+        yield from self.verify_vnf_state(nsr_vnfs[0], "failed")
+
+        yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2)
+        vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0])
+        yield from self.verify_vlr_state(vnf_vlrs[0], "running")
+        yield from self.verify_vlr_state(vnf_vlrs[1], "running")
+
+        yield from self.verify_num_vnfr_vdus(nsr_vnfs[0], 2)
+
+        vdus = yield from self.get_vnf_vdus(nsr_vnfs[0])
+        self.verify_vdu_state(vdus[0], "running")
+        self.verify_vdu_state(vdus[1], "failed")
+
+        self.verify_number_network_requests(3)
+        self.verify_number_compute_requests(2)
+        self.verify_number_allocated_network(3)
+        self.verify_number_allocated_compute(1)
+
+        yield from self.terminate_nsr(nsr_id)
+
+        yield from self.verify_nsr_deleted(nsr_id)
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0)
+        yield from self.verify_num_vlrs(0)
+
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        self.unregister_mock_res_mgr()
+        self.unpublish_descriptors()
+
+    @rift.test.dts.async_test
+    def test_fail_second_vnf_second_vdu(self):
+        yield from self.publish_desciptors(num_internal_vlrs=2, num_ping_vms=2)
+        yield from self.register_mock_res_mgr()
+        yield from self.create_mock_launchpad_tasklet()
+        self.allocate_network_handlers(5)
+        self.allocate_compute_handlers(3)
+
+        nsr_id = yield from self.ping_pong.create_nsr()
+        yield from self.wait_until_nsr_active_or_failed(nsr_id)
+
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1)
+        yield from self.verify_nsr_state(nsr_id, "failed")
+        yield from self.verify_num_vlrs(5)
+        yield from self.verify_num_nsr_vlrs(nsr_id, 1)
+
+        nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        yield from self.verify_vlr_state(nsr_vlrs[0], "running")
+
+        yield from self.verify_num_nsr_vnfrs(nsr_id, 2)
+
+        # Verify only a single vnfr was instantiated and is failed
+        yield from self.verify_num_vnfrs(2)
+        nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id)
+        yield from self.verify_vnf_state(nsr_vnfs[0], "running")
+        yield from self.verify_vnf_state(nsr_vnfs[1], "failed")
+
+        yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2)
+
+        vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0])
+        yield from self.verify_vlr_state(vnf_vlrs[0], "running")
+        yield from self.verify_vlr_state(vnf_vlrs[1], "running")
+
+        vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[1])
+        yield from self.verify_vlr_state(vnf_vlrs[0], "running")
+        yield from self.verify_vlr_state(vnf_vlrs[1], "running")
+
+        yield from self.verify_num_vnfr_vdus(nsr_vnfs[0], 2)
+        yield from self.verify_num_vnfr_vdus(nsr_vnfs[1], 2)
+
+        vdus = yield from self.get_vnf_vdus(nsr_vnfs[0])
+        self.verify_vdu_state(vdus[0], "running")
+        self.verify_vdu_state(vdus[1], "running")
+
+        vdus = yield from self.get_vnf_vdus(nsr_vnfs[1])
+        self.verify_vdu_state(vdus[0], "running")
+        self.verify_vdu_state(vdus[1], "failed")
+
+        self.verify_number_network_requests(5)
+        self.verify_number_compute_requests(4)
+        self.verify_number_allocated_network(5)
+        self.verify_number_allocated_compute(3)
+
+        yield from self.terminate_nsr(nsr_id)
+
+        yield from self.verify_nsr_deleted(nsr_id)
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0)
+        yield from self.verify_num_vlrs(0)
+
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        self.unregister_mock_res_mgr()
+        self.unpublish_descriptors()
+
+
+def main():
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    args, _ = parser.parse_known_args()
+
+    ManoErrorTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner)
+
+if __name__ == '__main__':
+    main()
+
+# vim: sw
diff --git a/rwlaunchpad/test/mano_ut.py b/rwlaunchpad/test/mano_ut.py
new file mode 100755
index 0000000..69a0d40
--- /dev/null
+++ b/rwlaunchpad/test/mano_ut.py
@@ -0,0 +1,1198 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import asyncio
+import os
+import sys
+import unittest
+import uuid
+import xmlrunner
+import argparse
+import logging
+import time
+import types
+
+import gi
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('NsrYang', '1.0')
+gi.require_version('RwlogMgmtYang', '1.0')
+
+from gi.repository import (
+    RwCloudYang as rwcloudyang,
+    RwDts as rwdts,
+    RwLaunchpadYang as launchpadyang,
+    RwNsmYang as rwnsmyang,
+    RwNsrYang as rwnsryang,
+    NsrYang as nsryang,
+    RwResourceMgrYang as rmgryang,
+    RwcalYang as rwcalyang,
+    RwConfigAgentYang as rwcfg_agent,
+    RwlogMgmtYang
+)
+
+from gi.repository.RwTypes import RwStatus
+import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
+import rift.tasklets
+import rift.test.dts
+import rw_peas
+
+
+openstack_info = {
+        'username': 'pluto',
+        'password': 'mypasswd',
+        'auth_url': 'http://10.66.4.27:5000/v3/',
+        'project_name': 'demo',
+        'mgmt_network': 'private',
+        }
+
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class XPaths(object):
+    @staticmethod
+    def nsd(k=None):
+        return ("C,/nsd:nsd-catalog/nsd:nsd" +
+                ("[nsd:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def vld(k=None):
+        return ("C,/vld:vld-catalog/vld:vld" +
+                ("[vld:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def vnfd(k=None):
+        return ("C,/vnfd:vnfd-catalog/vnfd:vnfd" +
+                ("[vnfd:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def vnfr(k=None):
+        return ("D,/vnfr:vnfr-catalog/vnfr:vnfr" +
+                ("[vnfr:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def vlr(k=None):
+        return ("D,/vlr:vlr-catalog/vlr:vlr" +
+                ("[vlr:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def nsd_ref_count(k=None):
+        return ("D,/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" +
+                ("[rw-nsr:nsd-id-ref='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def vnfd_ref_count(k=None):
+        return ("D,/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" +
+                ("[rw-nsr:nsd-id-ref='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def nsr_config(k=None):
+        return ("C,/nsr:ns-instance-config/nsr:nsr" +
+                ("[nsr:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def nsr_opdata(k=None):
+        return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
+                ("[nsr:ns-instance-config-ref='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def nsr_config_status(k=None):
+        return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
+                ("[nsr:ns-instance-config-ref='{}']/config_status".format(k) if k is not None else ""))
+
+    @staticmethod
+    def cm_state(k=None):
+        if k is None:
+            return ("D,/rw-conman:cm-state/rw-conman:cm-nsr")
+        else:
+            return ("D,/rw-conman:cm-state/rw-conman:cm-nsr" +
+                    ("[rw-conman:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def nsr_scale_group_instance(nsr_id=None, group_name=None, index=None):
+        return (("D,/nsr:ns-instance-opdata/nsr:nsr") +
+                ("[nsr:ns-instance-config-ref='{}']".format(nsr_id) if nsr_id is not None else "") +
+                ("/nsr:scaling-group-record") +
+                ("[nsr:scaling-group-name-ref='{}']".format(group_name) if group_name is not None else "") +
+                ("/nsr:instance") +
+                ("[nsr:scaling-group-index-ref='{}']".format(index) if index is not None else ""))
+
+    @staticmethod
+    def nsr_scale_group_instance_config(nsr_id=None, group_name=None, index=None):
+        return (("C,/nsr:ns-instance-config/nsr:nsr") +
+                ("[nsr:id='{}']".format(nsr_id) if nsr_id is not None else "") +
+                ("/nsr:scaling-group") +
+                ("[nsr:scaling-group-name-ref='{}']".format(group_name) if group_name is not None else "") +
+                ("/nsr:instance") +
+                ("[nsr:index='{}']".format(index) if index is not None else ""))
+
+
+class ManoQuerier(object):
+    def __init__(self, log, dts):
+        self.log = log
+        self.dts = dts
+
+    @asyncio.coroutine
+    def _read_query(self, xpath, do_trace=False):
+        self.log.debug("Running XPATH read query: %s (trace: %s)", xpath, do_trace)
+        flags = rwdts.XactFlag.MERGE
+        flags += rwdts.XactFlag.TRACE if do_trace else 0
+        res_iter = yield from self.dts.query_read(
+                xpath, flags=flags
+                )
+
+        results = []
+        for i in res_iter:
+            result = yield from i
+            if result is not None:
+                results.append(result.result)
+
+        return results
+
+    @asyncio.coroutine
+    def get_cm_state(self, nsr_id=None):
+        return (yield from self._read_query(XPaths.cm_state(nsr_id), False))
+
+    @asyncio.coroutine
+    def get_nsr_opdatas(self, nsr_id=None):
+        return (yield from self._read_query(XPaths.nsr_opdata(nsr_id), False))
+
+    @asyncio.coroutine
+    def get_nsr_scale_group_instance_opdata(self, nsr_id=None, group_name=None, index=None):
+        return (yield from self._read_query(XPaths.nsr_scale_group_instance(nsr_id, group_name, index), False))
+        #return (yield from self._read_query(XPaths.nsr_scale_group_instance(nsr_id, group_name), True))
+
+    @asyncio.coroutine
+    def get_nsr_configs(self, nsr_id=None):
+        return (yield from self._read_query(XPaths.nsr_config(nsr_id)))
+
+    @asyncio.coroutine
+    def get_nsr_config_status(self, nsr_id=None):
+        return (yield from self._read_query(XPaths.nsr_config_status(nsr_id)))
+
+    @asyncio.coroutine
+    def get_vnfrs(self, vnfr_id=None):
+        return (yield from self._read_query(XPaths.vnfr(vnfr_id)))
+
+    @asyncio.coroutine
+    def get_vlrs(self, vlr_id=None):
+        return (yield from self._read_query(XPaths.vlr(vlr_id)))
+
+    @asyncio.coroutine
+    def get_nsd_ref_counts(self, nsd_id=None):
+        return (yield from self._read_query(XPaths.nsd_ref_count(nsd_id)))
+
+    @asyncio.coroutine
+    def get_vnfd_ref_counts(self, vnfd_id=None):
+        return (yield from self._read_query(XPaths.vnfd_ref_count(vnfd_id)))
+
+    @asyncio.coroutine
+    def delete_nsr(self, nsr_id):
+        with self.dts.transaction() as xact:
+            yield from self.dts.query_delete(
+                    XPaths.nsr_config(nsr_id),
+                    0
+                    #rwdts.XactFlag.TRACE,
+                    #rwdts.Flag.ADVISE,
+                    )
+
+    @asyncio.coroutine
+    def delete_nsd(self, nsd_id):
+        nsd_xpath = XPaths.nsd(nsd_id)
+        self.log.debug("Attempting to delete NSD with path = %s", nsd_xpath)
+        with self.dts.transaction() as xact:
+            yield from self.dts.query_delete(
+                    nsd_xpath,
+                    rwdts.XactFlag.ADVISE,
+                    )
+
+    @asyncio.coroutine
+    def delete_vnfd(self, vnfd_id):
+        vnfd_xpath = XPaths.vnfd(vnfd_id)
+        self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath)
+        with self.dts.transaction() as xact:
+            yield from self.dts.query_delete(
+                    vnfd_xpath,
+                    rwdts.XactFlag.ADVISE,
+                    )
+
+    @asyncio.coroutine
+    def update_nsd(self, nsd_id, nsd_msg):
+        nsd_xpath = XPaths.nsd(nsd_id)
+        self.log.debug("Attempting to update NSD with path = %s", nsd_xpath)
+        with self.dts.transaction() as xact:
+            yield from self.dts.query_update(
+                    nsd_xpath,
+                    rwdts.XactFlag.ADVISE,
+                    nsd_msg,
+                    )
+
+    @asyncio.coroutine
+    def update_vnfd(self, vnfd_id, vnfd_msg):
+        vnfd_xpath = XPaths.vnfd(vnfd_id)
+        self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath)
+        with self.dts.transaction() as xact:
+            yield from self.dts.query_update(
+                    vnfd_xpath,
+                    rwdts.XactFlag.ADVISE,
+                    vnfd_msg,
+                    )
+
+    @asyncio.coroutine
+    def update_nsr_config(self, nsr_id, nsr_msg):
+        nsr_xpath = XPaths.nsr_config(nsr_id)
+        self.log.debug("Attempting to update NSR with path = %s", nsr_xpath)
+        with self.dts.transaction() as xact:
+            yield from self.dts.query_update(
+                    nsr_xpath,
+                    rwdts.XactFlag.ADVISE|rwdts.XactFlag.REPLACE,
+                    nsr_msg,
+                    )
+
+
+class ManoTestCase(rift.test.dts.AbstractDTSTest):
+    @asyncio.coroutine
+    def verify_nsr_state(self, nsr_id, state):
+        nsrs = yield from self.querier.get_nsr_opdatas(nsr_id)
+        self.assertEqual(1, len(nsrs))
+        nsr = nsrs[0]
+
+        self.log.debug("Got nsr = %s", nsr)
+        self.assertEqual(state, nsr.operational_status)
+
+    @asyncio.coroutine
+    def verify_vlr_state(self, vlr_id, state):
+        vlrs = yield from self.querier.get_vlrs(vlr_id)
+        self.assertEqual(1, len(vlrs))
+        vlr = vlrs[0]
+
+        self.assertEqual(state, vlr.operational_status)
+
+    def verify_vdu_state(self, vdu, state):
+        self.assertEqual(state, vdu.operational_status)
+
+    @asyncio.coroutine
+    def verify_vnf_state(self, vnfr_id, state):
+        vnfrs = yield from self.querier.get_vnfrs(vnfr_id)
+        self.assertEqual(1, len(vnfrs))
+        vnfr = vnfrs[0]
+
+        self.assertEqual(state, vnfr.operational_status)
+
+    @asyncio.coroutine
+    def terminate_nsr(self, nsr_id):
+        self.log.debug("Terminating nsr id: %s", nsr_id)
+        yield from self.querier.delete_nsr(nsr_id)
+
+    @asyncio.coroutine
+    def verify_nsr_deleted(self, nsr_id):
+        nsr_opdatas = yield from self.querier.get_nsr_opdatas(nsr_id)
+        self.assertEqual(0, len(nsr_opdatas))
+
+        nsr_configs = yield from self.querier.get_nsr_configs(nsr_id)
+        self.assertEqual(0, len(nsr_configs))
+
+    @asyncio.coroutine
+    def verify_num_vlrs(self, num_vlrs):
+        vlrs = yield from self.querier.get_vlrs()
+        self.assertEqual(num_vlrs, len(vlrs))
+
+    @asyncio.coroutine
+    def get_nsr_vlrs(self, nsr_id):
+        nsrs = yield from self.querier.get_nsr_opdatas(nsr_id)
+        return [v.vlr_ref for v in nsrs[0].vlr]
+
+    @asyncio.coroutine
+    def get_nsr_vnfs(self, nsr_id):
+        nsrs = yield from self.querier.get_nsr_opdatas(nsr_id)
+        return nsrs[0].constituent_vnfr_ref
+
+    @asyncio.coroutine
+    def get_vnf_vlrs(self, vnfr_id):
+        vnfrs = yield from self.querier.get_vnfrs(vnfr_id)
+        return [i.vlr_ref for i in vnfrs[0].internal_vlr]
+
+    @asyncio.coroutine
+    def verify_num_nsr_vlrs(self, nsr_id, num_vlrs):
+        vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        self.assertEqual(num_vlrs, len(vlrs))
+
+    @asyncio.coroutine
+    def verify_num_nsr_vnfrs(self, nsr_id, num_vnfs):
+        vnfs = yield from self.get_nsr_vnfs(nsr_id)
+        self.assertEqual(num_vnfs, len(vnfs))
+
+    @asyncio.coroutine
+    def verify_num_vnfr_vlrs(self, vnfr_id, num_vlrs):
+        vlrs = yield from self.get_vnf_vlrs(vnfr_id)
+        self.assertEqual(num_vlrs, len(vlrs))
+
+    @asyncio.coroutine
+    def get_vnf_vdus(self, vnfr_id):
+        vnfrs = yield from self.querier.get_vnfrs(vnfr_id)
+        return [i for i in vnfrs[0].vdur]
+
+    @asyncio.coroutine
+    def verify_num_vnfr_vdus(self, vnfr_id, num_vdus):
+        vdus = yield from self.get_vnf_vdus(vnfr_id)
+        self.assertEqual(num_vdus, len(vdus))
+
+    @asyncio.coroutine
+    def verify_num_vnfrs(self, num_vnfrs):
+        vnfrs = yield from self.querier.get_vnfrs()
+        self.assertEqual(num_vnfrs, len(vnfrs))
+
+    @asyncio.coroutine
+    def verify_nsd_ref_count(self, nsd_id, num_ref):
+        nsd_ref_counts = yield from self.querier.get_nsd_ref_counts(nsd_id)
+        self.assertEqual(num_ref, nsd_ref_counts[0].instance_ref_count)
+
+class DescriptorPublisher(object):
+    def __init__(self, log, loop, dts):
+        self.log = log
+        self.loop = loop
+        self.dts = dts
+
+        self._registrations = []
+
+    @asyncio.coroutine
+    def publish(self, w_path, path, desc):
+        ready_event = asyncio.Event(loop=self.loop)
+
+        @asyncio.coroutine
+        def on_ready(regh, status):
+            self.log.debug("Create element: %s, obj-type:%s obj:%s",
+                           path, type(desc), desc)
+            with self.dts.transaction() as xact:
+                regh.create_element(path, desc, xact.xact)
+            self.log.debug("Created element: %s, obj:%s", path, desc)
+            ready_event.set()
+
+        handler = rift.tasklets.DTS.RegistrationHandler(
+                on_ready=on_ready
+                )
+
+        self.log.debug("Registering path: %s, obj:%s", w_path, desc)
+        reg = yield from self.dts.register(
+                w_path,
+                handler,
+                flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
+                )
+        self._registrations.append(reg)
+        self.log.debug("Registered path : %s", w_path)
+        yield from ready_event.wait()
+
+        return reg
+
+    def unpublish_all(self):
+        self.log.debug("Deregistering all published descriptors")
+        for reg in self._registrations:
+            reg.deregister()
+
+
+class PingPongNsrConfigPublisher(object):
+    XPATH = "C,/nsr:ns-instance-config"
+
+    def __init__(self, log, loop, dts, ping_pong, cloud_account_name):
+        self.dts = dts
+        self.log = log
+        self.loop = loop
+        self.ref = None
+
+        self.querier = ManoQuerier(log, dts)
+
+        self.nsr_config = rwnsryang.YangData_Nsr_NsInstanceConfig()
+
+        nsr = rwnsryang.YangData_Nsr_NsInstanceConfig_Nsr()
+        nsr.id = str(uuid.uuid4())
+        nsr.name = "ns1.{}".format(nsr.id)
+        nsr.nsd = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_Nsd()
+        nsr.nsd.from_dict(ping_pong.ping_pong_nsd.nsd.as_dict())
+        nsr.cloud_account = cloud_account_name
+
+        nsr.vnf_cloud_account_map.add().from_dict({
+            'member_vnf_index_ref': nsr.nsd.constituent_vnfd[0].member_vnf_index,
+            'config_agent_account': 'RiftCA',
+            #'cloud_account':'mock_account1'
+        })
+
+        inputs = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter()
+        inputs.xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(ping_pong.nsd_id)
+        inputs.value = "inigo montoya"
+
+        fast_cpu = {'metadata_key': 'FASTCPU', 'metadata_value': 'True'}
+        self.create_nsd_placement_group_map(nsr,
+                                            group_name      = 'Orcus',
+                                            cloud_type      = 'openstack',
+                                            construct_type  = 'host_aggregate',
+                                            construct_value = [fast_cpu])
+
+        fast_storage = {'metadata_key': 'FASTSSD', 'metadata_value': 'True'}
+        self.create_nsd_placement_group_map(nsr,
+                                            group_name      = 'Quaoar',
+                                            cloud_type      = 'openstack',
+                                            construct_type  = 'host_aggregate',
+                                            construct_value = [fast_storage])
+
+        fast_cpu = {'metadata_key': 'BLUE_HW', 'metadata_value': 'True'}
+        self.create_vnfd_placement_group_map(nsr,
+                                             group_name      = 'Eris',
+                                             vnfd_id         = ping_pong.ping_vnfd_id,
+                                             cloud_type      = 'openstack',
+                                             construct_type  = 'host_aggregate',
+                                             construct_value = [fast_cpu])
+
+        fast_storage = {'metadata_key': 'YELLOW_HW', 'metadata_value': 'True'}
+        self.create_vnfd_placement_group_map(nsr,
+                                             group_name      = 'Weywot',
+                                             vnfd_id         = ping_pong.pong_vnfd_id,
+                                             cloud_type      = 'openstack',
+                                             construct_type  = 'host_aggregate',
+                                             construct_value = [fast_storage])
+
+
+        nsr.input_parameter.append(inputs)
+
+        self._nsr = nsr
+        self.nsr_config.nsr.append(nsr)
+
+        self._ready_event = asyncio.Event(loop=self.loop)
+        asyncio.ensure_future(self.register(), loop=loop)
+
+    @asyncio.coroutine
+    def register(self):
+        @asyncio.coroutine
+        def on_ready(regh, status):
+            self._ready_event.set()
+
+        self.log.debug("Registering path: %s", PingPongNsrConfigPublisher.XPATH)
+        self.reg = yield from self.dts.register(
+                PingPongNsrConfigPublisher.XPATH,
+                flags=rwdts.Flag.PUBLISHER,
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_ready=on_ready,
+                    ),
+                )
+
+    @asyncio.coroutine
+    def publish(self):
+        self.log.debug("Publishing NSR: {}".format(self.nsr_config))
+        yield from self._ready_event.wait()
+        with self.dts.transaction() as xact:
+            self.reg.create_element(
+                    PingPongNsrConfigPublisher.XPATH,
+                    self.nsr_config,
+                    xact=xact.xact,
+                    )
+
+        return self._nsr.id
+
+    @asyncio.coroutine
+    def create_scale_group_instance(self, group_name, index):
+        index = 1
+        scaling_group = self.nsr_config.nsr[0].scaling_group.add()
+        scaling_group.from_dict({
+            "scaling_group_name_ref": group_name,
+            "instance": [{"index": index}],
+            })
+        with self.dts.transaction() as xact:
+            self.reg.update_element(
+                    PingPongNsrConfigPublisher.XPATH,
+                    self.nsr_config,
+                    xact=xact.xact,
+                    )
+
+        return index
+
+    def create_nsd_placement_group_map(self,
+                                       nsr,
+                                       group_name,
+                                       cloud_type,
+                                       construct_type,
+                                       construct_value):
+        placement_group  = nsr.nsd_placement_group_maps.add()
+        placement_group.from_dict({
+            "placement_group_ref" : group_name,
+            "cloud_type"          : cloud_type,
+            construct_type        : construct_value,
+            })
+        
+
+    def create_vnfd_placement_group_map(self,
+                                        nsr,
+                                        group_name,
+                                        vnfd_id,
+                                        cloud_type,
+                                        construct_type,
+                                        construct_value):
+        placement_group  = nsr.vnfd_placement_group_maps.add()
+        placement_group.from_dict({
+            "placement_group_ref"  : group_name,
+            "vnfd_id_ref"          : vnfd_id,
+            "cloud_type"           : cloud_type,
+            construct_type         : construct_value,
+            })
+        
+    
+    @asyncio.coroutine
+    def delete_scale_group_instance(self, group_name, index):
+        self.log.debug("Deleting scale group %s instance %s", group_name, index)
+        #del self.nsr_config.nsr[0].scaling_group[0].instance[0]
+        xpath = XPaths.nsr_scale_group_instance_config(self.nsr_config.nsr[0].id, group_name, index)
+        yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
+        #with self.dts.transaction() as xact:
+        #    self.reg.update_element(
+        #            PingPongNsrConfigPublisher.XPATH,
+        #            self.nsr_config,
+        #            flags=rwdts.XactFlag.REPLACE,
+        #            xact=xact.xact,
+        #            )
+
+    def deregister(self):
+        if self.reg is not None:
+            self.reg.deregister()
+
+    def create_nsr_vl(self):
+        vld = self.nsr_config.nsr[0].nsd.vld.add()
+        vld.id = 'ping_pong_vld_2'
+        vld.name = 'ping_pong_vld_2'  # hard coded
+        vld.short_name = vld.name
+        vld.vendor = 'RIFT.io'
+        vld.description = 'Toy VL'
+        vld.version = '1.0'
+        vld.type_yang = 'ELAN'
+
+        # cpref = vld.vnfd_connection_point_ref.add()
+        # cpref.member_vnf_index_ref = cp[0]
+        # cpref.vnfd_id_ref = cp[1]
+        # cpref.vnfd_connection_point_ref = cp[2]
+
+        vld = self.nsr_config.nsr[0].vl_cloud_account_map.add()
+        vld.vld_id_ref = 'ping_pong_vld_2'
+        vld.cloud_accounts = ["mock_account"]
+
+    @asyncio.coroutine
+    def add_nsr_vl(self):
+        self.create_nsr_vl()
+        yield from self.querier.update_nsr_config(
+            self.nsr_config.nsr[0].id,
+            self.nsr_config.nsr[0],
+        )
+
+    @asyncio.coroutine
+    def del_nsr_vl(self):
+        for vld in self.nsr_config.nsr[0].nsd.vld:
+            if vld.id == 'ping_pong_vld_2':
+                self.nsr_config.nsr[0].nsd.vld.remove(vld)
+                break
+
+        yield from self.querier.update_nsr_config(
+            self.nsr_config.nsr[0].id,
+            self.nsr_config.nsr[0],
+        )
+
+    def update_vnf_cloud_map(self,vnf_cloud_map):
+        self.log.debug("Modifying NSR to add VNF cloud account map: {}".format(vnf_cloud_map))
+        for vnf_index,cloud_acct  in vnf_cloud_map.items():
+            vnf_maps = [vnf_map for vnf_map in self.nsr_config.nsr[0].vnf_cloud_account_map if vnf_index == vnf_map.member_vnf_index_ref]
+            if vnf_maps:
+                vnf_maps[0].cloud_account = cloud_acct
+            else: 
+                self.nsr_config.nsr[0].vnf_cloud_account_map.add().from_dict({
+                    'member_vnf_index_ref':vnf_index,
+                    'cloud_account':cloud_acct
+                    })
+
+
+class PingPongDescriptorPublisher(object):
+    def __init__(self, log, loop, dts, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1):
+        self.log = log
+        self.loop = loop
+        self.dts = dts
+
+        self.querier = ManoQuerier(self.log, self.dts)
+        self.publisher = DescriptorPublisher(self.log, self.loop, self.dts)
+        self.ping_vnfd, self.pong_vnfd, self.ping_pong_nsd = \
+                ping_pong_nsd.generate_ping_pong_descriptors(
+                        pingcount=1,
+                        external_vlr_count=num_external_vlrs,
+                        internal_vlr_count=num_internal_vlrs,
+                        num_vnf_vms=2,
+                        mano_ut=True,
+                        use_scale_group=True,
+                        use_mon_params=False,
+                        )
+
+        self.config_dir = os.path.join(os.getenv('RIFT_ARTIFACTS'),
+                                       "launchpad/libs",
+                                       self.ping_pong_nsd.id,
+                                       "config")
+
+    @property
+    def nsd_id(self):
+        return self.ping_pong_nsd.id
+
+    @property
+    def ping_vnfd_id(self):
+        return self.ping_vnfd.id
+
+    @property
+    def pong_vnfd_id(self):
+        return self.pong_vnfd.id
+
+    @asyncio.coroutine
+    def publish_desciptors(self):
+        # Publish ping_vnfd
+        xpath = XPaths.vnfd(self.ping_vnfd_id)
+        xpath_wild = XPaths.vnfd()
+        for obj in self.ping_vnfd.descriptor.vnfd:
+            self.log.debug("Publishing ping_vnfd path: %s - %s, type:%s, obj:%s",
+                           xpath, xpath_wild, type(obj), obj)
+            yield from self.publisher.publish(xpath_wild, xpath, obj)
+
+        # Publish pong_vnfd
+        xpath = XPaths.vnfd(self.pong_vnfd_id)
+        xpath_wild = XPaths.vnfd()
+        for obj in self.pong_vnfd.descriptor.vnfd:
+            self.log.debug("Publishing pong_vnfd path: %s, wild_path: %s, obj:%s",
+                           xpath, xpath_wild, obj)
+            yield from self.publisher.publish(xpath_wild, xpath, obj)
+
+        # Publish ping_pong_nsd
+        xpath = XPaths.nsd(self.nsd_id)
+        xpath_wild = XPaths.nsd()
+        for obj in self.ping_pong_nsd.descriptor.nsd:
+            self.log.debug("Publishing ping_pong nsd path: %s, wild_path: %s, obj:%s",
+                           xpath, xpath_wild, obj)
+            yield from self.publisher.publish(xpath_wild, xpath, obj)
+
+        self.log.debug("DONE - publish_desciptors")
+
+    def unpublish_descriptors(self):
+        self.publisher.unpublish_all()
+
+    @asyncio.coroutine
+    def delete_nsd(self):
+        yield from self.querier.delete_nsd(self.ping_pong_nsd.id)
+
+    @asyncio.coroutine
+    def delete_ping_vnfd(self):
+        yield from self.querier.delete_vnfd(self.ping_vnfd.id)
+
+    @asyncio.coroutine
+    def update_nsd(self):
+        yield from self.querier.update_nsd(
+                self.ping_pong_nsd.id,
+                self.ping_pong_nsd.descriptor.nsd[0]
+                )
+
+    @asyncio.coroutine
+    def update_ping_vnfd(self):
+        yield from self.querier.update_vnfd(
+                self.ping_vnfd.id,
+                self.ping_vnfd.descriptor.vnfd[0]
+                )
+
+
+
+
+class ManoTestCase(rift.test.dts.AbstractDTSTest):
+    """
+    DTS GI interface unittests
+
+    Note:  Each tests uses a list of asyncio.Events for staging through the
+    test.  These are required here because we are bring up each coroutine
+    ("tasklet") at the same time and are not implementing any re-try
+    mechanisms.  For instance, this is used in numerous tests to make sure that
+    a publisher is up and ready before the subscriber sends queries.  Such
+    event lists should not be used in production software.
+    """
+
+    @classmethod
+    def configure_suite(cls, rwmain):
+        vns_dir = os.environ.get('VNS_DIR')
+        vnfm_dir = os.environ.get('VNFM_DIR')
+        nsm_dir = os.environ.get('NSM_DIR')
+        rm_dir = os.environ.get('RM_DIR')
+
+        rwmain.add_tasklet(vns_dir, 'rwvnstasklet')
+        rwmain.add_tasklet(vnfm_dir, 'rwvnfmtasklet')
+        rwmain.add_tasklet(nsm_dir, 'rwnsmtasklet')
+        rwmain.add_tasklet(rm_dir, 'rwresmgrtasklet')
+        rwmain.add_tasklet(rm_dir, 'rwconmantasklet')
+
+    @classmethod
+    def configure_schema(cls):
+        return rwnsmyang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    @staticmethod
+    def get_cal_account(account_type, account_name):
+        """
+        Creates an object for class RwcalYang.Clo
+        """
+        account = rwcloudyang.CloudAccount()
+        if account_type == 'mock':
+            account.name          = account_name
+            account.account_type  = "mock"
+            account.mock.username = "mock_user"
+        elif ((account_type == 'openstack_static') or (account_type == 'openstack_dynamic')):
+            account.name = account_name
+            account.account_type = 'openstack'
+            account.openstack.key = openstack_info['username']
+            account.openstack.secret       = openstack_info['password']
+            account.openstack.auth_url     = openstack_info['auth_url']
+            account.openstack.tenant       = openstack_info['project_name']
+            account.openstack.mgmt_network = openstack_info['mgmt_network']
+        return account
+
+    @asyncio.coroutine
+    def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1"):
+        account = self.get_cal_account(cloud_type, cloud_name)
+        account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name)
+        self.log.info("Configuring cloud-account: %s", account)
+        yield from dts.query_create(account_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    account)
+
+    @asyncio.coroutine
+    def wait_tasklets(self):
+        yield from asyncio.sleep(5, loop=self.loop)
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", self.id())
+        self.tinfo = self.new_tinfo(self.id())
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+        self.ping_pong = PingPongDescriptorPublisher(self.log, self.loop, self.dts)
+        self.querier = ManoQuerier(self.log, self.dts)
+        self.nsr_publisher = PingPongNsrConfigPublisher(
+                self.log,
+                loop,
+                self.dts,
+                self.ping_pong,
+                "mock_account",
+                )
+
+    def test_create_nsr_record(self):
+
+        @asyncio.coroutine
+        def verify_cm_state(termination=False, nsrid=None):
+            self.log.debug("Verifying cm_state path = %s", XPaths.cm_state(nsrid))
+            #print("###>>> Verifying cm_state path:", XPaths.cm_state(nsrid))
+
+            loop_count = 10
+            loop_sleep = 10
+            while loop_count:
+                yield from asyncio.sleep(loop_sleep, loop=self.loop)
+                loop_count -= 1
+                cm_nsr = None
+                cm_nsr_i = yield from self.querier.get_cm_state(nsr_id=nsrid)
+                if (cm_nsr_i is not None and len(cm_nsr_i) != 0):
+                    self.assertEqual(1, len(cm_nsr_i))
+                    cm_nsr = cm_nsr_i[0].as_dict()
+                    #print("###>>> cm_nsr=", cm_nsr)
+                if termination:
+                    if len(cm_nsr_i) == 0:
+                        print("\n###>>> cm-state NSR deleted OK <<<###\n")
+                        return
+                elif (cm_nsr is not None and
+                    'state' in cm_nsr and
+                    (cm_nsr['state'] == 'ready')):
+                    self.log.debug("Got cm_nsr record %s", cm_nsr)
+                    print("\n###>>> cm-state NSR 'ready' OK <<<###\n")
+                    return
+
+                # if (len(cm_nsr_i) == 1 and cm_nsr_i[0].state == 'ready'):
+                #     self.log.debug("Got cm_nsr record %s", cm_nsr)
+                # else:
+                #     yield from asyncio.sleep(10, loop=self.loop)
+
+            print("###>>> Failed cm-state, termination:", termination)
+            self.assertEqual(1, loop_count)
+
+        @asyncio.coroutine
+        def verify_nsr_opdata(termination=False):
+            self.log.debug("Verifying nsr opdata path = %s", XPaths.nsr_opdata())
+
+            while True:
+                nsrs = yield from self.querier.get_nsr_opdatas()
+                if termination:
+                    if len(nsrs) != 0:
+                        for i in range(10):
+                            nsrs = yield from self.querier.get_nsr_opdatas()
+                            if len(nsrs) == 0:
+                                self.log.debug("No active NSR records found. NSR termination successful")
+                                return
+                        else:
+                            self.assertEqual(0, len(nsrs))
+                            self.log.error("Active NSR records found. NSR termination failed")
+
+                    else:
+                        self.log.debug("No active NSR records found. NSR termination successful")
+                        self.assertEqual(0, len(nsrs))
+                        return
+
+                nsr = nsrs[0]
+                self.log.debug("Got nsr record %s", nsr)
+                if nsr.operational_status == 'running':
+                    self.log.debug("!!! Rcvd NSR with running status !!!")
+                    self.assertEqual("configuring", nsr.config_status)
+                    break
+
+                self.log.debug("Rcvd NSR with %s status", nsr.operational_status)
+                self.log.debug("Sleeping for 10 seconds")
+                yield from asyncio.sleep(10, loop=self.loop)
+
+        @asyncio.coroutine
+        def verify_nsr_config(termination=False):
+            self.log.debug("Verifying nsr config path = %s", XPaths.nsr_config())
+
+            nsr_configs = yield from self.querier.get_nsr_configs()
+            self.assertEqual(1, len(nsr_configs))
+
+            nsr_config = nsr_configs[0]
+            self.assertEqual(
+                    "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(self.ping_pong.nsd_id),
+                    nsr_config.input_parameter[0].xpath,
+                    )
+
+        @asyncio.coroutine
+        def verify_nsr_config_status(termination=False, nsrid=None):
+            if termination is False and nsrid is not None:
+                self.log.debug("Verifying nsr config status path = %s", XPaths.nsr_opdata(nsrid))
+
+                loop_count = 6
+                loop_sleep = 10
+                while loop_count:
+                    loop_count -= 1
+                    yield from asyncio.sleep(loop_sleep, loop=self.loop)
+                    nsr_opdata_l = yield from self.querier.get_nsr_opdatas(nsrid)
+                    self.assertEqual(1, len(nsr_opdata_l))
+                    nsr_opdata = nsr_opdata_l[0].as_dict()
+                    if ("configured" == nsr_opdata['config_status']):
+                        print("\n###>>> NSR Config Status 'configured' OK <<<###\n")
+                        return
+                self.assertEqual("configured", nsr_opdata['config_status'])
+
+        @asyncio.coroutine
+        def verify_vnfr_record(termination=False):
+            self.log.debug("Verifying vnfr record path = %s, Termination=%d",
+                           XPaths.vnfr(), termination)
+            if termination:
+                for i in range(10):
+                    vnfrs = yield from self.querier.get_vnfrs()
+                    if len(vnfrs) == 0:
+                        return True
+
+                    for vnfr in vnfrs:
+                        self.log.debug("VNFR still exists = %s", vnfr)
+
+                    yield from asyncio.sleep(.5, loop=self.loop)
+
+
+                assert len(vnfrs) == 0
+
+            while True:
+                vnfrs = yield from self.querier.get_vnfrs()
+                if len(vnfrs) != 0 and termination is False:
+                    vnfr = vnfrs[0]
+                    self.log.debug("Rcvd VNFR with %s status", vnfr.operational_status)
+                    if vnfr.operational_status == 'running':
+                        self.log.debug("!!! Rcvd VNFR with running status !!!")
+                        return True
+
+                    elif vnfr.operational_status == "failed":
+                        self.log.debug("!!! Rcvd VNFR with failed status !!!")
+                        return False
+
+                self.log.debug("Sleeping for 10 seconds")
+                yield from asyncio.sleep(10, loop=self.loop)
+
+
+        @asyncio.coroutine
+        def verify_vnfr_cloud_account(vnf_index, cloud_account):
+            self.log.debug("Verifying vnfr record Cloud account for vnf index = %d is %s", vnf_index,cloud_account)
+            vnfrs = yield from self.querier.get_vnfrs()
+            cloud_accounts = [vnfr.cloud_account for vnfr in vnfrs if vnfr.member_vnf_index_ref == vnf_index]
+            self.log.debug("VNFR cloud account for index %d is %s", vnf_index,cloud_accounts[0])
+            assert cloud_accounts[0] == cloud_account
+
+        @asyncio.coroutine
+        def verify_vlr_record(termination=False):
+            vlr_xpath = XPaths.vlr()
+            self.log.debug("Verifying vlr record path = %s, termination: %s",
+                           vlr_xpath, termination)
+            res_iter = yield from self.dts.query_read(vlr_xpath)
+
+            for i in res_iter:
+                result = yield from i
+                if termination:
+                    self.assertIsNone(result)
+
+                self.log.debug("Got vlr record %s", result)
+
+        @asyncio.coroutine
+        def verify_vlrs(nsr_id, count=0):
+            while True:
+                nsrs = yield from self.querier.get_nsr_opdatas()
+                nsr = nsrs[0]
+                self.log.debug("Got nsr record %s", nsr)
+                if nsr.operational_status == 'running':
+                    self.log.debug("!!! Rcvd NSR with running status !!!")
+                    # Check the VLR count
+                    if (len(nsr.vlr)) == count:
+                        self.log.debug("NSR %s has %d VLRs", nsr_id, count)
+                        break
+
+                self.log.debug("Rcvd NSR %s with %s status", nsr_id, nsr.operational_status)
+                self.log.debug("Sleeping for 10 seconds")
+                yield from asyncio.sleep(10, loop=self.loop)
+
+        @asyncio.coroutine
+        def verify_nsd_ref_count(termination):
+            self.log.debug("Verifying nsd ref count= %s", XPaths.nsd_ref_count())
+            res_iter = yield from self.dts.query_read(XPaths.nsd_ref_count())
+
+            for i in res_iter:
+                result = yield from i
+                self.log.debug("Got nsd ref count record %s", result)
+
+        @asyncio.coroutine
+        def verify_vnfd_ref_count(termination):
+            self.log.debug("Verifying vnfd ref count= %s", XPaths.vnfd_ref_count())
+            res_iter = yield from self.dts.query_read(XPaths.vnfd_ref_count())
+
+            for i in res_iter:
+                result = yield from i
+                self.log.debug("Got vnfd ref count record %s", result)
+
+        @asyncio.coroutine
+        def verify_scale_group_reaches_state(nsr_id, scale_group, index, state, timeout=1000):
+            start_time = time.time()
+            instance_state = None
+            while (time.time() - start_time) < timeout:
+                results = yield from self.querier.get_nsr_opdatas(nsr_id=nsr_id)
+                if len(results) == 1:
+                    result = results[0]
+                    if len(result.scaling_group_record) == 0:
+                        continue
+
+                    if len(result.scaling_group_record[0].instance) == 0:
+                        continue
+
+                    instance = result.scaling_group_record[0].instance[0]
+                    self.assertEqual(instance.scaling_group_index_ref, index)
+
+                    instance_state = instance.op_status
+                    if instance_state == state:
+                        self.log.debug("Scale group instance reached %s state", state)
+                        return
+
+                yield from asyncio.sleep(1, loop=self.loop)
+
+            self.assertEqual(state, instance_state)
+
+        @asyncio.coroutine
+        def verify_results(termination=False, nsrid=None):
+            yield from verify_vnfr_record(termination)
+            #yield from verify_vlr_record(termination)
+            yield from verify_nsr_opdata(termination)
+            yield from verify_nsr_config(termination)
+            yield from verify_nsd_ref_count(termination)
+            yield from verify_vnfd_ref_count(termination)
+
+            # Config Manager
+            yield from verify_cm_state(termination, nsrid)
+            yield from verify_nsr_config_status(termination, nsrid)
+
+        @asyncio.coroutine
+        def verify_scale_instance(index):
+            self.log.debug("Verifying scale record path = %s, Termination=%d",
+                           XPaths.vnfr(), termination)
+            if termination:
+                for i in range(5):
+                    vnfrs = yield from self.querier.get_vnfrs()
+                    if len(vnfrs) == 0:
+                        return True
+
+                    for vnfr in vnfrs:
+                        self.log.debug("VNFR still exists = %s", vnfr)
+
+
+                assert len(vnfrs) == 0
+
+            while True:
+                vnfrs = yield from self.querier.get_vnfrs()
+                if len(vnfrs) != 0 and termination is False:
+                    vnfr = vnfrs[0]
+                    self.log.debug("Rcvd VNFR with %s status", vnfr.operational_status)
+                    if vnfr.operational_status == 'running':
+                        self.log.debug("!!! Rcvd VNFR with running status !!!")
+                        return True
+
+                    elif vnfr.operational_status == "failed":
+                        self.log.debug("!!! Rcvd VNFR with failed status !!!")
+                        return False
+
+                self.log.debug("Sleeping for 10 seconds")
+                yield from asyncio.sleep(10, loop=self.loop)
+
+        @asyncio.coroutine
+        def terminate_ns(nsr_id):
+            xpath = XPaths.nsr_config(nsr_id)
+            self.log.debug("Terminating network service with path %s", xpath)
+            yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
+            self.log.debug("Terminated network service with path %s", xpath)
+
+        @asyncio.coroutine
+        def run_test():
+            yield from self.wait_tasklets()
+
+
+            cloud_type = "mock"
+            yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account")
+            yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account1")
+
+            yield from self.ping_pong.publish_desciptors()
+
+            # Attempt deleting VNFD not in use
+            yield from self.ping_pong.update_ping_vnfd()
+
+            # Attempt updating NSD not in use
+            yield from self.ping_pong.update_nsd()
+
+            # Attempt deleting VNFD not in use
+            yield from self.ping_pong.delete_ping_vnfd()
+
+            # Attempt deleting NSD not in use
+            yield from self.ping_pong.delete_nsd()
+
+            yield from self.ping_pong.publish_desciptors()
+
+            nsr_id = yield from self.nsr_publisher.publish()
+
+            yield from verify_results(nsrid=nsr_id)
+
+            # yield from self.nsr_publisher.create_scale_group_instance("ping_group", 1)
+
+            # yield from verify_scale_group_reaches_state(nsr_id, "ping_group", 1, "running")
+
+            # yield from self.nsr_publisher.delete_scale_group_instance("ping_group", 1)
+
+            yield from asyncio.sleep(10, loop=self.loop)
+
+            # Attempt deleting VNFD in use
+            yield from self.ping_pong.delete_ping_vnfd()
+
+            # Attempt updating NSD in use
+            yield from self.ping_pong.update_nsd()
+
+            # Update NSD in use with new VL
+            yield from self.nsr_publisher.add_nsr_vl()
+
+            # Verify the new VL has been added
+            yield from verify_vlrs(nsr_id, count=2)
+
+            # Delete the added VL
+            yield from self.nsr_publisher.del_nsr_vl()
+
+            # Verify the new VL has been added
+            yield from verify_vlrs(nsr_id, count=1)
+
+            # Attempt deleting NSD in use
+            yield from self.ping_pong.delete_nsd()
+
+            yield from terminate_ns(nsr_id)
+
+            yield from asyncio.sleep(25, loop=self.loop)
+            self.log.debug("Verifying termination results")
+            yield from verify_results(termination=True, nsrid=nsr_id)
+            self.log.debug("Verified termination results")
+
+            # Multi site NS case
+            self.log.debug("Testing multi site NS")
+            self.nsr_publisher.update_vnf_cloud_map({1:"mock_account1",2:"mock_account"})
+            nsr_id = yield from self.nsr_publisher.publish()
+
+            yield from verify_results(nsrid=nsr_id)
+            yield from verify_vnfr_cloud_account(1,"mock_account1")
+            yield from verify_vnfr_cloud_account(2,"mock_account")
+            yield from verify_vlrs(nsr_id, count=2)
+
+            yield from terminate_ns(nsr_id)
+
+            yield from asyncio.sleep(25, loop=self.loop)
+            self.log.debug("Verifying termination results for multi site NS")
+            yield from verify_results(termination=True, nsrid=nsr_id)
+            self.log.debug("Verified termination results for multi site NS")
+
+            self.log.debug("Attempting to delete VNFD for real")
+            yield from self.ping_pong.delete_ping_vnfd()
+
+            self.log.debug("Attempting to delete NSD for real")
+            yield from self.ping_pong.delete_nsd()
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+
+def main():
+    plugin_dir = os.path.join(os.environ["RIFT_INSTALL"], "usr/lib/rift/plugins")
+    if 'VNS_DIR' not in os.environ:
+        os.environ['VNS_DIR'] = os.path.join(plugin_dir, 'rwvns')
+
+    if 'VNFM_DIR' not in os.environ:
+        os.environ['VNFM_DIR'] = os.path.join(plugin_dir, 'rwvnfm')
+
+    if 'NSM_DIR' not in os.environ:
+        os.environ['NSM_DIR'] = os.path.join(plugin_dir, 'rwnsm')
+
+    if 'RM_DIR' not in os.environ:
+        os.environ['RM_DIR'] = os.path.join(plugin_dir, 'rwresmgrtasklet')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+
+    ManoTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
+
+# vim: sw=4
diff --git a/rwlaunchpad/test/mgmt_recovery.py b/rwlaunchpad/test/mgmt_recovery.py
new file mode 100755
index 0000000..29f0ab0
--- /dev/null
+++ b/rwlaunchpad/test/mgmt_recovery.py
@@ -0,0 +1,385 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import logging
+import os
+import resource
+import socket
+import sys
+import subprocess
+import shlex
+import shutil
+import netifaces
+
+from rift.rwlib.util import certs
+import rift.rwcal.cloudsim
+import rift.rwcal.cloudsim.net
+import rift.vcs
+import rift.vcs.core as core
+import rift.vcs.demo
+import rift.vcs.vms
+
+import rift.rwcal.cloudsim
+import rift.rwcal.cloudsim.net
+
+from rift.vcs.ext import ClassProperty
+
+logger = logging.getLogger(__name__)
+
+
+class NsmTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a network services manager tasklet.
+    """
+
+    def __init__(self, name='network-services-manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a NsmTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(NsmTasklet, self).__init__(name=name, uid=uid,
+                                         config_ready=config_ready,
+                                         recovery_action=recovery_action,
+                                         data_storetype=data_storetype,
+                                        )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwnsmtasklet')
+    plugin_name = ClassProperty('rwnsmtasklet')
+
+
+class VnsTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a network services manager tasklet.
+    """
+
+    def __init__(self, name='virtual-network-service', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a VnsTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(VnsTasklet, self).__init__(name=name, uid=uid,
+                                         config_ready=config_ready,
+                                         recovery_action=recovery_action,
+                                         data_storetype=data_storetype,
+                                        )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnstasklet')
+    plugin_name = ClassProperty('rwvnstasklet')
+
+
+class VnfmTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a virtual network function manager tasklet.
+    """
+
+    def __init__(self, name='virtual-network-function-manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a VnfmTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(VnfmTasklet, self).__init__(name=name, uid=uid,
+                                          config_ready=config_ready,
+                                          recovery_action=recovery_action,
+                                          data_storetype=data_storetype,
+                                         )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnfmtasklet')
+    plugin_name = ClassProperty('rwvnfmtasklet')
+
+
+class ResMgrTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a Resource Manager tasklet.
+    """
+
+    def __init__(self, name='Resource-Manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a ResMgrTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(ResMgrTasklet, self).__init__(name=name, uid=uid,
+                                            config_ready=config_ready,
+                                            recovery_action=recovery_action,
+                                            data_storetype=data_storetype,
+                                           )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwresmgrtasklet')
+    plugin_name = ClassProperty('rwresmgrtasklet')
+
+
+class MonitorTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a tasklet that is used to monitor NFVI metrics.
+    """
+
+    def __init__(self, name='nfvi-metrics-monitor', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a MonitorTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+
+        """
+        super(MonitorTasklet, self).__init__(name=name, uid=uid,
+                                             config_ready=config_ready,
+                                             recovery_action=recovery_action,
+                                             data_storetype=data_storetype,
+                                            )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwmonitor')
+    plugin_name = ClassProperty('rwmonitor')
+
+
+def get_ui_ssl_args():
+    """Returns the SSL parameter string for launchpad UI processes"""
+
+    try:
+        use_ssl, certfile_path, keyfile_path = certs.get_bootstrap_cert_and_key()
+    except certs.BootstrapSslMissingException:
+        logger.error('No bootstrap certificates found.  Disabling UI SSL')
+        use_ssl = False
+
+    # If we're not using SSL, no SSL arguments are necessary
+    if not use_ssl:
+        return ""
+
+    return "--enable-https --keyfile-path=%s --certfile-path=%s" % (keyfile_path, certfile_path)
+
+
+class UIServer(rift.vcs.NativeProcess):
+    def __init__(self, name="RW.MC.UI",
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        super(UIServer, self).__init__(
+                name=name,
+                exe="./usr/share/rw.ui/skyquake/scripts/launch_ui.sh",
+                config_ready=config_ready,
+                recovery_action=recovery_action,
+                data_storetype=data_storetype,
+                )
+
+    @property
+    def args(self):
+        return get_ui_ssl_args()
+
+
+class RedisServer(rift.vcs.NativeProcess):
+    def __init__(self, name="RW.Redis.Server",
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        super(RedisServer, self).__init__(
+                name=name,
+                exe="/usr/bin/redis-server",
+                config_ready=config_ready,
+                recovery_action=recovery_action,
+                data_storetype=data_storetype,
+                )
+
+    @property
+    def args(self):
+        return "./usr/bin/active_redis.conf --port 9999"
+
+class ConfigManagerTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a Resource Manager tasklet.
+    """
+
+    def __init__(self, name='Configuration-Manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a ConfigManagerTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(ConfigManagerTasklet, self).__init__(name=name, uid=uid,
+                                                   config_ready=config_ready,
+                                                   recovery_action=recovery_action,
+                                                   data_storetype=data_storetype,
+                                                  )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwconmantasklet')
+    plugin_name = ClassProperty('rwconmantasklet')
+
+
+class Demo(rift.vcs.demo.Demo):
+    def __init__(self,mgmt_ip_list):
+
+        procs = [
+            ConfigManagerTasklet(),
+            UIServer(),
+            RedisServer(),
+            rift.vcs.RestPortForwardTasklet(),
+            rift.vcs.RestconfTasklet(),
+            rift.vcs.RiftCli(),
+            rift.vcs.uAgentTasklet(),
+            rift.vcs.Launchpad(),
+            ]
+
+        standby_procs = [
+            RedisServer(),
+            rift.vcs.uAgentTasklet(mode_active=False),
+            ]
+
+        restart_procs = [
+            VnfmTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=core.DataStore.REDIS.value),
+            VnsTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=core.DataStore.REDIS.value),
+            MonitorTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=core.DataStore.REDIS.value),
+            NsmTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=core.DataStore.REDIS.value),
+            ResMgrTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=core.DataStore.REDIS.value),
+            ]
+        super(Demo, self).__init__(
+            # Construct the system. This system consists of 1 cluster in 1
+            # colony. The master cluster houses CLI and management VMs
+            sysinfo = rift.vcs.SystemInfo(
+                    zookeeper=rift.vcs.manifest.RaZookeeper(zake=False, master_ip=mgmt_ip_list[0]),
+                    colonies=[
+                            rift.vcs.Colony(
+                                name='master',
+                                uid=1,
+                                clusters=[
+                                    rift.vcs.VirtualMachine(
+                                        name='vm-templ-1',
+                                        ip=mgmt_ip_list[0],
+                                        procs=procs,
+                                        restart_procs=restart_procs,
+                                        ),
+                                    rift.vcs.VirtualMachine(
+                                        name='vm-templ-2',
+                                        ip=mgmt_ip_list[1],
+                                        standby_procs=standby_procs,
+                                        start=False,
+                                        ),
+                                    ] if len(mgmt_ip_list) == 2 else [
+                                    rift.vcs.VirtualMachine(
+                                        name='vm-templ-1',
+                                        ip=mgmt_ip_list[0],
+                                        procs=procs,
+                                        restart_procs=restart_procs,
+                                        ),
+                                    ]
+                                )
+                            ],
+                        ),
+
+            # Define the generic portmap.
+            port_map = {},
+
+            # Define a mapping from the placeholder logical names to the real
+            # port names for each of the different modes supported by this demo.
+            port_names = {
+                'ethsim': {
+                },
+                'pci': {
+                }
+            },
+
+            # Define the connectivity between logical port names.
+            port_groups = {},
+        )
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s')
+
+    # Create a parser which includes all generic demo arguments
+    parser = rift.vcs.demo.DemoArgParser()
+
+    args = parser.parse_args(argv)
+
+    # Disable loading any kernel modules for the launchpad VM
+    # since it doesn't need it and it will fail within containers
+    os.environ["NO_KERNEL_MODS"] = "1"
+
+    # Remove the persistant DTS recovery files 
+    for f in os.listdir(os.environ["INSTALLDIR"]):
+        if f.endswith(".db"):
+            os.remove(os.path.join(os.environ["INSTALLDIR"], f))
+
+    #load demo info and create Demo object
+    demo = Demo(args.mgmt_ip_list)
+
+    # Create the prepared system from the demo
+    system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args, 
+              northbound_listing="cli_launchpad_schema_listing.txt",
+              netconf_trace_override=True)
+
+    confd_ip = socket.gethostbyname(socket.gethostname())
+    intf = netifaces.ifaddresses('eth0')
+    if intf and netifaces.AF_INET in intf and len(intf[netifaces.AF_INET]):
+       confd_ip = intf[netifaces.AF_INET][0]['addr']
+    rift.vcs.logger.configure_sink(config_file=None, confd_ip=confd_ip)
+
+    # Start the prepared system
+    system.start()
+
+
+if __name__ == "__main__":
+    resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY) )
+    try:
+        main()
+    except rift.vcs.demo.ReservationError:
+        print("ERROR: unable to retrieve a list of IP addresses from the reservation system")
+        sys.exit(1)
+    except rift.vcs.demo.MissingModeError:
+        print("ERROR: you need to provide a mode to run the script")
+        sys.exit(1)
+    finally:
+        os.system("stty sane")
diff --git a/rwlaunchpad/test/pytest/lp_kt_utm_test.py b/rwlaunchpad/test/pytest/lp_kt_utm_test.py
new file mode 100644
index 0000000..0a8d6ba
--- /dev/null
+++ b/rwlaunchpad/test/pytest/lp_kt_utm_test.py
@@ -0,0 +1,306 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@date 10/15/2015
+@brief Launchpad Module Test
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import subprocess
+import time
+import uuid
+import gi
+
+gi.require_version('RwlogMgmtYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwIwpYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+
+from gi.repository import (
+        NsdYang,
+        NsrYang,
+        RwBaseYang,
+        RwCloudYang,
+        RwIwpYang,
+        RwlogMgmtYang,
+        RwNsmYang,
+        RwNsrYang,
+        RwResourceMgrYang,
+        RwConmanYang,
+        RwVnfdYang,
+        VldYang,
+        )
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+RW_KT_UTM_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/kt_utm"
+    )
+
+RW_KT_UTM_NSD_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/nsds/utm_only"
+    )
+
+
+class PackageError(Exception):
+    pass
+
+
+def raise_package_error():
+    raise PackageError("Could not find ns packages")
+
+
+@pytest.fixture(scope='module')
+def iwp_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwIwpYang)
+
+
+@pytest.fixture(scope='module')
+def rwlog_mgmt_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwlogMgmtYang)
+
+
+@pytest.fixture(scope='module')
+def resource_mgr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwResourceMgrYang)
+
+
+@pytest.fixture(scope='module')
+def cloud_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwCloudYang)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+
+@pytest.fixture(scope='module')
+def so_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwConmanYang)
+
+
+@pytest.fixture(scope='module')
+def nsm_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsmYang)
+
+
+@pytest.fixture(scope='session')
+def kt_utm_vnfd_package_file():
+    ktutm_pkg_file = os.path.join(
+            RW_KT_UTM_PKG_INSTALL_DIR,
+            "kt_utm_vnfd.tar.gz",
+            )
+    if not os.path.exists(ktutm_pkg_file):
+        raise_package_error()
+
+    return ktutm_pkg_file
+
+@pytest.fixture(scope='session')
+def utm_only_nsd_package_file():
+      ktutm_nsd_pkg_file = os.path.join(
+              RW_KT_UTM_NSD_PKG_INSTALL_DIR,
+              "utm_only_nsd.tar.gz",
+              )
+      if not os.path.exists(ktutm_nsd_pkg_file):
+          raise_package_error()
+  
+      return ktutm_nsd_pkg_file
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete",
+                transaction_id)
+    start_time = time.time()
+    while (time.time() - start_time) < timeout_secs:
+        r = requests.get(
+                'http://{host}:4567/api/upload/{t_id}/state'.format(
+                    host=host, t_id=transaction_id
+                    )
+                )
+        state = r.json()
+        if state["status"] == "pending":
+            time.sleep(1)
+            continue
+
+        elif state["status"] == "success":
+            logger.info("Descriptor onboard was successful")
+            return
+
+        else:
+            raise DescriptorOnboardError(state)
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+def create_nsr_from_nsd_id(nsd_id):
+      nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+      nsr.id = str(uuid.uuid4())
+      nsr.name = "UTM-only"
+      nsr.short_name = "UTM-only"
+      nsr.description = "1 VNFs with 5 VLs"
+      nsr.nsd_ref = nsd_id
+      nsr.admin_status = "ENABLED"
+  
+      return nsr
+
+@pytest.mark.incremental
+class TestLaunchpadStartStop(object):
+    def test_configure_logging(self, rwlog_mgmt_proxy):
+        logging = RwlogMgmtYang.Logging.from_dict({
+                "console": {
+                    "on": True,
+                    "filter": {
+                        "category": [{
+                            "name": "rw-generic",
+                            "severity": "error"
+                            }],
+                        }
+                    }
+                })
+        rwlog_mgmt_proxy.merge_config("/rwlog-mgmt:logging", logging)
+
+    def test_configure_cloud_account(self, cloud_proxy, logger):
+        cloud_account = RwCloudYang.CloudAccountConfig()
+        # cloud_account.name = "cloudsim_proxy"
+        # cloud_account.account_type = "cloudsim_proxy"
+        cloud_account.name = "openstack"
+        cloud_account.account_type = "openstack"
+        cloud_account.openstack.key = 'pluto'
+        cloud_account.openstack.secret = 'mypasswd'
+        cloud_account.openstack.auth_url = 'http://10.66.4.13:5000/v3/'
+        cloud_account.openstack.tenant = 'demo'
+        cloud_account.openstack.mgmt_network = 'private'
+
+        cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+
+    def test_configure_pools(self, resource_mgr_proxy):
+        pools = RwResourceMgrYang.ResourcePools.from_dict({
+            "pools": [{ "name": "vm_pool_a",
+                        "resource_type": "compute",
+                        "pool_type" : "dynamic"},
+                      {"name": "network_pool_a",
+                       "resource_type": "network",
+                       "pool_type" : "dynamic",}]})
+
+        resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+
+    def test_configure_resource_orchestrator(self, so_proxy):
+        cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
+                                                'ro_port'      :  2022,
+                                                'ro_username'  : 'admin',
+                                                'ro_password'  : 'admin'})
+        so_proxy.merge_config('/rw-conman:cm-config', cfg)
+
+    def test_configure_service_orchestrator(self, nsm_proxy):
+        cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
+                                              'cm_port'      :  2022,
+                                              'cm_username'  : 'admin',
+                                              'cm_password'  : 'admin'})
+        nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
+
+    
+    def test_onboard_ktutm_vnfd(self, logger, vnfd_proxy, kt_utm_vnfd_package_file):
+        logger.info("Onboarding kt_utm_vnfd package: %s", kt_utm_vnfd_package_file)
+        trans_id = upload_descriptor(logger, kt_utm_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should only be a single vnfd"
+        vnfd = vnfds[0]
+        assert vnfd.name == "kt_utm_vnfd"
+
+    def test_onboard_utm_only_nsd(self, logger, nsd_proxy, utm_only_nsd_package_file):
+          logger.info("Onboarding utm_onlynsd package: %s", utm_only_nsd_package_file)
+          trans_id = upload_descriptor(logger, utm_only_nsd_package_file)
+          wait_unboard_transaction_finished(logger, trans_id)
+  
+          catalog = nsd_proxy.get_config('/nsd-catalog')
+          nsds = catalog.nsd
+          assert len(nsds) == 1, "There should only be a single nsd"
+          nsd = nsds[0]
+  
+    def test_instantiate_utm_only_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
+          catalog = nsd_proxy.get_config('/nsd-catalog')
+          nsd = catalog.nsd[0]
+  
+          nsr = create_nsr_from_nsd_id(nsd.id)
+          nsr_proxy.merge_config('/ns-instance-config', nsr)
+  
+          nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+          nsrs = nsr_opdata.nsr
+          assert len(nsrs) == 1
+          assert nsrs[0].ns_instance_config_ref == nsr.id
diff --git a/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py b/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py
new file mode 100644
index 0000000..705565b
--- /dev/null
+++ b/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py
@@ -0,0 +1,333 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@date 10/15/2015
+@brief Launchpad Module Test
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import subprocess
+import time
+import uuid
+import gi
+
+gi.require_version('RwlogMgmtYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwIwpYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+
+from gi.repository import (
+        NsdYang,
+        NsrYang,
+        RwBaseYang,
+        RwCloudYang,
+        RwIwpYang,
+        RwlogMgmtYang,
+        RwNsmYang,
+        RwNsrYang,
+        RwResourceMgrYang,
+        RwConmanYang,
+        RwVnfdYang,
+        VldYang,
+        )
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+RW_KT_UTM_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/kt_utm"
+    )
+
+RW_KT_WIMS_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/kt_wims"
+    )
+
+RW_KT_UTM_WIMS_NSD_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/nsds/utm_wims"
+    )
+
+
+class PackageError(Exception):
+    pass
+
+
+def raise_package_error():
+    raise PackageError("Could not find ns packages")
+
+
+@pytest.fixture(scope='module')
+def iwp_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwIwpYang)
+
+
+@pytest.fixture(scope='module')
+def rwlog_mgmt_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwlogMgmtYang)
+
+
+@pytest.fixture(scope='module')
+def resource_mgr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwResourceMgrYang)
+
+
+@pytest.fixture(scope='module')
+def cloud_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwCloudYang)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+
+@pytest.fixture(scope='module')
+def so_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwConmanYang)
+
+
+@pytest.fixture(scope='module')
+def nsm_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsmYang)
+
+
+@pytest.fixture(scope='session')
+def kt_utm_vnfd_package_file():
+    ktutm_pkg_file = os.path.join(
+            RW_KT_UTM_PKG_INSTALL_DIR,
+            "kt_utm_vnfd.tar.gz",
+            )
+    if not os.path.exists(ktutm_pkg_file):
+        raise_package_error()
+
+    return ktutm_pkg_file
+
+@pytest.fixture(scope='session')
+def kt_wims_vnfd_package_file():
+    ktwims_pkg_file = os.path.join(
+            RW_KT_WIMS_PKG_INSTALL_DIR,
+            "kt_wims_vnfd.tar.gz",
+            )
+    if not os.path.exists(ktwims_pkg_file):
+        raise_package_error()
+
+    return ktwims_pkg_file
+
+@pytest.fixture(scope='session')
+def utm_wims_nsd_package_file():
+      ktutm_wims_nsd_pkg_file = os.path.join(
+              RW_KT_UTM_WIMS_NSD_PKG_INSTALL_DIR,
+              "utm_wims_nsd.tar.gz",
+              )
+      if not os.path.exists(ktutm_wims_nsd_pkg_file):
+          raise_package_error()
+  
+      return ktutm_wims_nsd_pkg_file
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete",
+                transaction_id)
+    start_time = time.time()
+    while (time.time() - start_time) < timeout_secs:
+        r = requests.get(
+                'http://{host}:4567/api/upload/{t_id}/state'.format(
+                    host=host, t_id=transaction_id
+                    )
+                )
+        state = r.json()
+        if state["status"] == "pending":
+            time.sleep(1)
+            continue
+
+        elif state["status"] == "success":
+            logger.info("Descriptor onboard was successful")
+            return
+
+        else:
+            raise DescriptorOnboardError(state)
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+def create_nsr_from_nsd_id(nsd_id):
+      nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+      nsr.id = str(uuid.uuid4())
+      nsr.name = "UTM-WIMS"
+      nsr.short_name = "UTM-WIMS"
+      nsr.description = "2 VNFs with 4 VLs"
+      nsr.nsd_ref = nsd_id
+      nsr.admin_status = "ENABLED"
+  
+      return nsr
+
+@pytest.mark.incremental
+class TestLaunchpadStartStop(object):
+    def test_configure_logging(self, rwlog_mgmt_proxy):
+        logging = RwlogMgmtYang.Logging.from_dict({
+                "console": {
+                    "on": True,
+                    "filter": {
+                        "category": [{
+                            "name": "rw-generic",
+                            "severity": "error"
+                            }],
+                        }
+                    }
+                })
+        rwlog_mgmt_proxy.merge_config("/rwlog-mgmt:logging", logging)
+
+    def test_configure_cloud_account(self, cloud_proxy, logger):
+        cloud_account = RwCloudYang.CloudAccountConfig()
+        # cloud_account.name = "cloudsim_proxy"
+        # cloud_account.account_type = "cloudsim_proxy"
+        cloud_account.name = "openstack"
+        cloud_account.account_type = "openstack"
+        cloud_account.openstack.key = 'pluto'
+        cloud_account.openstack.secret = 'mypasswd'
+        cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/'
+        cloud_account.openstack.tenant = 'demo'
+        cloud_account.openstack.mgmt_network = 'private'
+
+        cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+
+    def test_configure_pools(self, resource_mgr_proxy):
+        pools = RwResourceMgrYang.ResourcePools.from_dict({
+            "pools": [{ "name": "vm_pool_a",
+                        "resource_type": "compute",
+                        "pool_type" : "dynamic"},
+                      {"name": "network_pool_a",
+                       "resource_type": "network",
+                       "pool_type" : "dynamic",}]})
+
+        resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+
+    def test_configure_resource_orchestrator(self, so_proxy):
+        cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
+                                                'ro_port'      :  2022,
+                                                'ro_username'  : 'admin',
+                                                'ro_password'  : 'admin'})
+        so_proxy.merge_config('/rw-conman:cm-config', cfg)
+
+    def test_configure_service_orchestrator(self, nsm_proxy):
+        cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
+                                              'cm_port'      :  2022,
+                                              'cm_username'  : 'admin',
+                                              'cm_password'  : 'admin'})
+        nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
+
+    
+    def test_onboard_ktutm_vnfd(self, logger, vnfd_proxy, kt_utm_vnfd_package_file):
+        logger.info("Onboarding kt_utm_vnfd package: %s", kt_utm_vnfd_package_file)
+        trans_id = upload_descriptor(logger, kt_utm_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should only be a single vnfd"
+        vnfd = vnfds[0]
+        assert vnfd.name == "kt_utm_vnfd"
+
+    def test_onboard_ktwims_vnfd(self, logger, vnfd_proxy, kt_wims_vnfd_package_file):
+        logger.info("Onboarding kt_wims_vnfd package: %s", kt_wims_vnfd_package_file)
+        trans_id = upload_descriptor(logger, kt_wims_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should only be two vnfd"
+        assert "kt_wims_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_utm_wims_nsd(self, logger, nsd_proxy, utm_wims_nsd_package_file):
+        logger.info("Onboarding utm_wims_nsd package: %s", utm_wims_nsd_package_file)
+        trans_id = upload_descriptor(logger, utm_wims_nsd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+  
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+  
+    def test_instantiate_utm_wims_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+ 
+        nsr = create_nsr_from_nsd_id(nsd.id)
+        nsr_proxy.merge_config('/ns-instance-config', nsr)
+  
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
diff --git a/rwlaunchpad/test/pytest/lp_test.py b/rwlaunchpad/test/pytest/lp_test.py
new file mode 100644
index 0000000..b987b35
--- /dev/null
+++ b/rwlaunchpad/test/pytest/lp_test.py
@@ -0,0 +1,390 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@date 10/15/2015
+@brief Launchpad Module Test
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import subprocess
+import time
+import uuid
+import datetime
+
+import gi
+gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwIwpYang', '1.0')
+gi.require_version('RwlogMgmtYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+
+from gi.repository import (
+        NsdYang,
+        NsrYang,
+        RwBaseYang,
+        RwCloudYang,
+        RwIwpYang,
+        RwlogMgmtYang,
+        RwNsmYang,
+        RwNsrYang,
+        RwResourceMgrYang,
+        RwConmanYang,
+        RwVnfdYang,
+        VldYang,
+        )
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+RW_PING_PONG_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_ROOT"],
+    "images"
+    )
+
+class PackageError(Exception):
+    pass
+
+
+def raise_package_error():
+    raise PackageError("Could not find ns packages")
+
+
+@pytest.fixture(scope='module')
+def iwp_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwIwpYang)
+
+
+@pytest.fixture(scope='module')
+def rwlog_mgmt_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwlogMgmtYang)
+
+
+@pytest.fixture(scope='module')
+def resource_mgr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwResourceMgrYang)
+
+
+@pytest.fixture(scope='module')
+def cloud_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwCloudYang)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+
+@pytest.fixture(scope='module')
+def so_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwConmanYang)
+
+
+@pytest.fixture(scope='module')
+def nsm_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsmYang)
+
+
+@pytest.fixture(scope='session')
+def ping_vnfd_package_file():
+    ping_pkg_file = os.path.join(
+            RW_PING_PONG_PKG_INSTALL_DIR,
+            "ping_vnfd_with_image.tar.gz",
+            )
+    if not os.path.exists(ping_pkg_file):
+        raise_package_error()
+
+    return ping_pkg_file
+
+
+@pytest.fixture(scope='session')
+def pong_vnfd_package_file():
+    pong_pkg_file = os.path.join(
+            RW_PING_PONG_PKG_INSTALL_DIR,
+            "pong_vnfd_with_image.tar.gz",
+            )
+    if not os.path.exists(pong_pkg_file):
+        raise_package_error()
+
+    return pong_pkg_file
+
+
+@pytest.fixture(scope='session')
+def ping_pong_nsd_package_file():
+    ping_pong_pkg_file = os.path.join(
+            RW_PING_PONG_PKG_INSTALL_DIR,
+            "ping_pong_nsd.tar.gz",
+            )
+    if not os.path.exists(ping_pong_pkg_file):
+        raise_package_error()
+
+    return ping_pong_pkg_file
+
+
+def create_nsr_from_nsd_id(nsd_id):
+    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+    nsr.id = str(uuid.uuid4())
+    nsr.name = "pingpong_{}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
+    nsr.short_name = "nsr_short_name"
+    nsr.description = "This is a description"
+    nsr.nsd_ref = nsd_id
+    nsr.admin_status = "ENABLED"
+    nsr.cloud_account = "openstack"
+
+    param = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter()
+    param.xpath = '/nsd:nsd-catalog/nsd:nsd/nsd:vendor'
+    param.value = "rift-o-matic"
+
+    nsr.input_parameter.append(param)
+
+    return nsr
+
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete",
+                transaction_id)
+    start_time = time.time()
+    while (time.time() - start_time) < timeout_secs:
+        r = requests.get(
+                'http://{host}:4567/api/upload/{t_id}/state'.format(
+                    host=host, t_id=transaction_id
+                    )
+                )
+        state = r.json()
+        if state["status"] == "pending":
+            time.sleep(1)
+            continue
+
+        elif state["status"] == "success":
+            logger.info("Descriptor onboard was successful")
+            return
+
+        else:
+            raise DescriptorOnboardError(state)
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+
+@pytest.mark.incremental
+class TestLaunchpadStartStop(object):
+    def test_configure_logging(self, rwlog_mgmt_proxy):
+        logging = RwlogMgmtYang.Logging.from_dict({
+                "console": {
+                    "on": True,
+                    "filter": {
+                        "category": [{
+                            "name": "rw-generic",
+                            "severity": "error"
+                            }],
+                        }
+                    }
+                })
+        rwlog_mgmt_proxy.merge_config("/rwlog-mgmt:logging", logging)
+
+    def test_configure_cloud_account(self, cloud_proxy, logger):
+        cloud_account = RwCloudYang.CloudAccount()
+        # cloud_account.name = "cloudsim_proxy"
+        # cloud_account.account_type = "cloudsim_proxy"
+        cloud_account.name = "openstack"
+        cloud_account.account_type = "openstack"
+        cloud_account.openstack.key = 'pluto'
+        cloud_account.openstack.secret = 'mypasswd'
+        cloud_account.openstack.auth_url = 'http://10.96.4.2:5000/v3/'
+        cloud_account.openstack.tenant = 'mano1'
+        cloud_account.openstack.mgmt_network = 'private1'
+
+        cloud_proxy.merge_config("/rw-cloud:cloud/account", cloud_account)
+
+    def test_onboard_ping_vnfd(self, logger, vnfd_proxy, ping_vnfd_package_file):
+        logger.info("Onboarding ping_vnfd package: %s", ping_vnfd_package_file)
+        trans_id = upload_descriptor(logger, ping_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should only be a single vnfd"
+        vnfd = vnfds[0]
+        assert vnfd.name == "ping_vnfd"
+
+    def test_onboard_pong_vnfd(self, logger, vnfd_proxy, pong_vnfd_package_file):
+        logger.info("Onboarding pong_vnfd package: %s", pong_vnfd_package_file)
+        trans_id = upload_descriptor(logger, pong_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should be two vnfds"
+        assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_ping_pong_nsd(self, logger, nsd_proxy, ping_pong_nsd_package_file):
+        logger.info("Onboarding ping_pong_nsd package: %s", ping_pong_nsd_package_file)
+        trans_id = upload_descriptor(logger, ping_pong_nsd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+        assert nsd.name == "ping_pong_nsd"
+
+    def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        nsr = create_nsr_from_nsd_id(nsd.id)
+        rwnsr_proxy.merge_config('/ns-instance-config', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
+
+        # logger.info("Waiting up to 30 seconds for ping and pong components to show "
+        #          "up in show tasklet info")
+
+        # start_time = time.time()
+        # while (time.time() - start_time) < 30:
+        #     vcs_info = base_proxy.get('/vcs/info')
+        #     components = vcs_info.components.component_info
+
+        #     def find_component_by_name(name):
+        #         for component in components:
+        #             if name in component.component_name:
+        #                 return component
+
+        #         logger.warning("Did not find %s component name in show tasklet info",
+        #                     name)
+
+        #         return None
+
+        #     """
+        #     ping_cluster_component = find_component_by_name(
+        #             "rw_ping_vnfd:rwping_cluster"
+        #             )
+        #     if ping_cluster_component is None:
+        #         continue
+
+        #     pong_cluster_component = find_component_by_name(
+        #             "rw_pong_vnfd:rwpong_cluster"
+        #             )
+        #     if pong_cluster_component is None:
+        #         continue
+        #     """
+
+        #     ping_vm_component = find_component_by_name(
+        #             "rw_ping_vnfd:rwping_vm"
+        #             )
+        #     if ping_vm_component is None:
+        #         continue
+
+        #     pong_vm_component = find_component_by_name(
+        #             "rw_pong_vnfd:rwpong_vm"
+        #             )
+        #     if pong_vm_component is None:
+        #         continue
+
+        #     ping_proc_component = find_component_by_name(
+        #             "rw_ping_vnfd:rwping_proc"
+        #             )
+        #     if ping_proc_component is None:
+        #         continue
+
+        #     pong_proc_component = find_component_by_name(
+        #             "rw_pong_vnfd:rwpong_proc"
+        #             )
+        #     if pong_proc_component is None:
+        #         continue
+
+        #     ping_tasklet_component = find_component_by_name(
+        #             "rw_ping_vnfd:rwping_tasklet"
+        #             )
+        #     if ping_tasklet_component is None:
+        #         continue
+
+        #     pong_tasklet_component = find_component_by_name(
+        #             "rw_pong_vnfd:rwpong_tasklet"
+        #             )
+        #     if pong_tasklet_component is None:
+        #         continue
+
+        #     logger.info("TEST SUCCESSFUL: All ping and pong components were found in show tasklet info")
+        #     break
+
+        # else:
+        #     assert False, "Did not find all ping and pong component in time"
+
+    #def test_terminate_ping_pong_ns(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
+    #    nsr_configs = nsr_proxy.get_config('/ns-instance-config')
+    #    nsr = nsr_configs.nsr[0]
+    #    nsr_id = nsr.id
+
+    #    nsr_configs = nsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(nsr_id))
diff --git a/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py b/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py
new file mode 100644
index 0000000..16a8990
--- /dev/null
+++ b/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py
@@ -0,0 +1,325 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_3vnfs_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@date 10/15/2015
+@brief Launchpad Module Test ExtVNF
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import subprocess
+import time
+import uuid
+
+import gi
+gi.require_version('RwIwpYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+
+
+
+from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+RW_VROUTER_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/vrouter"
+    )
+RW_TRAFGEN_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/trafgen"
+    )
+RW_TRAFSINK_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/trafsink"
+    )
+RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/nsds/tg_2vrouter_ts"
+    )
+
+
+class PackageError(Exception):
+    pass
+
+
+def raise_package_error():
+    raise PackageError("Could not find ns packages")
+
+
+@pytest.fixture(scope='module')
+def iwp_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwIwpYang)
+
+@pytest.fixture(scope='module')
+def resource_mgr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwResourceMgrYang)
+
+
+@pytest.fixture(scope='module')
+def cloud_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwCloudYang)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+@pytest.fixture(scope='module')
+def so_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwConmanYang)
+
+@pytest.fixture(scope='module')
+def nsm_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsmYang)
+
+@pytest.fixture(scope='session')
+def vrouter_vnfd_package_file():
+    vrouter_pkg_file = os.path.join(
+            RW_VROUTER_PKG_INSTALL_DIR,
+            "vrouter_vnfd_with_epa.tar.gz",
+            )
+    if not os.path.exists(vrouter_pkg_file):
+        raise_package_error()
+
+    return vrouter_pkg_file
+
+@pytest.fixture(scope='session')
+def tg_vnfd_package_file():
+    tg_pkg_file = os.path.join(
+            RW_TRAFGEN_PKG_INSTALL_DIR,
+            "trafgen_vnfd_with_epa.tar.gz",
+            )
+    if not os.path.exists(tg_pkg_file):
+        raise_package_error()
+
+    return tg_pkg_file
+
+@pytest.fixture(scope='session')
+def ts_vnfd_package_file():
+    ts_pkg_file = os.path.join(
+            RW_TRAFSINK_PKG_INSTALL_DIR,
+            "trafsink_vnfd_with_epa.tar.gz",
+            )
+    if not os.path.exists(ts_pkg_file):
+        raise_package_error()
+
+    return ts_pkg_file
+
+@pytest.fixture(scope='session')
+def tg_2vrouter_ts_nsd_package_file():
+    tg_2vrouter_ts_nsd_pkg_file = os.path.join(
+            RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR,
+            "tg_2vrouter_ts_nsd_with_epa.tar.gz",
+            )
+    if not os.path.exists(tg_2vrouter_ts_nsd_pkg_file):
+        raise_package_error()
+
+    return tg_2vrouter_ts_nsd_pkg_file
+
+
+def create_nsr_from_nsd_id(nsd_id):
+    nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+    nsr.id = str(uuid.uuid4())
+    nsr.name = "TG-2Vrouter-TS EPA"
+    nsr.short_name = "TG-2Vrouter-TS EPA"
+    nsr.description = "4 VNFs with Trafgen, 2 Vrouters and Trafsink EPA"
+    nsr.nsd_ref = nsd_id
+    nsr.admin_status = "ENABLED"
+
+    return nsr
+
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete",
+             transaction_id)
+    start_time = time.time()
+    while (time.time() - start_time) < timeout_secs:
+        r = requests.get(
+                'http://{host}:4567/api/upload/{t_id}/state'.format(
+                    host=host, t_id=transaction_id
+                    )
+                )
+        state = r.json()
+        if state["status"] == "pending":
+            time.sleep(1)
+            continue
+
+        elif state["status"] == "success":
+            logger.info("Descriptor onboard was successful")
+            return
+
+        else:
+            raise DescriptorOnboardError(state)
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+@pytest.mark.incremental
+class TestLaunchpadStartStop(object):
+    def test_configure_cloud_account(self, cloud_proxy, logger):
+        cloud_account = RwCloudYang.CloudAccountConfig()
+        #cloud_account.name = "cloudsim_proxy"
+        #cloud_account.account_type = "cloudsim_proxy"
+        cloud_account.name = "riftuser1"
+        cloud_account.account_type = "openstack"
+        cloud_account.openstack.key = 'pluto'
+        cloud_account.openstack.secret = 'mypasswd'
+        cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/'
+        cloud_account.openstack.tenant = 'demo'
+        cloud_account.openstack.mgmt_network = 'private'
+
+        cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+
+    def test_configure_pools(self, resource_mgr_proxy):
+        pools = RwResourceMgrYang.ResourcePools.from_dict({
+            "pools": [{ "name": "vm_pool_a",
+                        "resource_type": "compute",
+                        "pool_type" : "dynamic"},
+                      {"name": "network_pool_a",
+                       "resource_type": "network",
+                       "pool_type" : "dynamic",}]})
+
+        resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+
+    def test_configure_resource_orchestrator(self, so_proxy):
+        cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
+                                                'ro_port'      :  2022,
+                                                'ro_username'  : 'admin',
+                                                'ro_password'  : 'admin'})
+        so_proxy.merge_config('/rw-conman:cm-config', cfg)
+
+    def test_configure_service_orchestrator(self, nsm_proxy):
+        cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
+                                              'cm_port'      :  2022,
+                                              'cm_username'  : 'admin',
+                                              'cm_password'  : 'admin'})
+        nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
+
+    
+    def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file):
+        logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file)
+        trans_id = upload_descriptor(logger, tg_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should be one vnfds"
+        assert "trafgen_vnfd" in [vnfds[0].name]
+
+    def test_onboard_vrouter_vnfd(self, logger, vnfd_proxy, vrouter_vnfd_package_file):
+        logger.info("Onboarding vrouter_vnfd package: %s", vrouter_vnfd_package_file)
+        trans_id = upload_descriptor(logger, vrouter_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should be two vnfds"
+        assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_ts_vnfd(self, logger, vnfd_proxy, ts_vnfd_package_file):
+        logger.info("Onboarding trafsink_vnfd package: %s", ts_vnfd_package_file)
+        trans_id = upload_descriptor(logger, ts_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 3, "There should be three vnfds"
+        assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name]
+
+    def test_onboard_tg_2vrouter_ts_nsd(self, logger, nsd_proxy, tg_2vrouter_ts_nsd_package_file):
+        logger.info("Onboarding tg_2vrouter_ts nsd package: %s", tg_2vrouter_ts_nsd_package_file)
+        trans_id = upload_descriptor(logger, tg_2vrouter_ts_nsd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+        assert nsd.name == "tg_vrouter_ts_nsd"
+        assert nsd.short_name == "tg_2vrouter_ts_nsd"
+
+    def test_instantiate_tg_2vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        nsr = create_nsr_from_nsd_id(nsd.id)
+        nsr_proxy.merge_config('/ns-instance-config', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
+
+
diff --git a/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py b/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py
new file mode 100644
index 0000000..ed00a25
--- /dev/null
+++ b/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py
@@ -0,0 +1,325 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_3vnfs_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@date 10/15/2015
+@brief Launchpad Module Test ExtVNF
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import subprocess
+import time
+import uuid
+
+import gi
+gi.require_version('RwIwpYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+
+
+
+from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+RW_VROUTER_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/vrouter"
+    )
+RW_TRAFGEN_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/trafgen"
+    )
+RW_TRAFSINK_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/trafsink"
+    )
+RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/nsds/tg_2vrouter_ts"
+    )
+
+
+class PackageError(Exception):
+    pass
+
+
+def raise_package_error():
+    raise PackageError("Could not find ns packages")
+
+
+@pytest.fixture(scope='module')
+def iwp_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwIwpYang)
+
+@pytest.fixture(scope='module')
+def resource_mgr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwResourceMgrYang)
+
+
+@pytest.fixture(scope='module')
+def cloud_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwCloudYang)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+@pytest.fixture(scope='module')
+def so_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwConmanYang)
+
+@pytest.fixture(scope='module')
+def nsm_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsmYang)
+
+@pytest.fixture(scope='session')
+def vrouter_vnfd_package_file():
+    vrouter_pkg_file = os.path.join(
+            RW_VROUTER_PKG_INSTALL_DIR,
+            "vrouter_vnfd.tar.gz",
+            )
+    if not os.path.exists(vrouter_pkg_file):
+        raise_package_error()
+
+    return vrouter_pkg_file
+
+@pytest.fixture(scope='session')
+def tg_vnfd_package_file():
+    tg_pkg_file = os.path.join(
+            RW_TRAFGEN_PKG_INSTALL_DIR,
+            "trafgen_vnfd.tar.gz",
+            )
+    if not os.path.exists(tg_pkg_file):
+        raise_package_error()
+
+    return tg_pkg_file
+
+@pytest.fixture(scope='session')
+def ts_vnfd_package_file():
+    ts_pkg_file = os.path.join(
+            RW_TRAFSINK_PKG_INSTALL_DIR,
+            "trafsink_vnfd.tar.gz",
+            )
+    if not os.path.exists(ts_pkg_file):
+        raise_package_error()
+
+    return ts_pkg_file
+
+@pytest.fixture(scope='session')
+def tg_2vrouter_ts_nsd_package_file():
+    tg_2vrouter_ts_nsd_pkg_file = os.path.join(
+            RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR,
+            "tg_2vrouter_ts_nsd.tar.gz",
+            )
+    if not os.path.exists(tg_2vrouter_ts_nsd_pkg_file):
+        raise_package_error()
+
+    return tg_2vrouter_ts_nsd_pkg_file
+
+
+def create_nsr_from_nsd_id(nsd_id):
+    nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+    nsr.id = str(uuid.uuid4())
+    nsr.name = "TG-2Vrouter-TS EPA"
+    nsr.short_name = "TG-2Vrouter-TS EPA"
+    nsr.description = "4 VNFs with Trafgen, 2 Vrouters and Trafsink EPA"
+    nsr.nsd_ref = nsd_id
+    nsr.admin_status = "ENABLED"
+
+    return nsr
+
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete",
+             transaction_id)
+    start_time = time.time()
+    while (time.time() - start_time) < timeout_secs:
+        r = requests.get(
+                'http://{host}:4567/api/upload/{t_id}/state'.format(
+                    host=host, t_id=transaction_id
+                    )
+                )
+        state = r.json()
+        if state["status"] == "pending":
+            time.sleep(1)
+            continue
+
+        elif state["status"] == "success":
+            logger.info("Descriptor onboard was successful")
+            return
+
+        else:
+            raise DescriptorOnboardError(state)
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+@pytest.mark.incremental
+class TestLaunchpadStartStop(object):
+    def test_configure_cloud_account(self, cloud_proxy, logger):
+        cloud_account = RwCloudYang.CloudAccountConfig()
+        #cloud_account.name = "cloudsim_proxy"
+        #cloud_account.account_type = "cloudsim_proxy"
+        cloud_account.name = "riftuser1"
+        cloud_account.account_type = "openstack"
+        cloud_account.openstack.key = 'pluto'
+        cloud_account.openstack.secret = 'mypasswd'
+        cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/'
+        cloud_account.openstack.tenant = 'demo'
+        cloud_account.openstack.mgmt_network = 'private'
+
+        cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+
+    def test_configure_pools(self, resource_mgr_proxy):
+        pools = RwResourceMgrYang.ResourcePools.from_dict({
+            "pools": [{ "name": "vm_pool_a",
+                        "resource_type": "compute",
+                        "pool_type" : "dynamic"},
+                      {"name": "network_pool_a",
+                       "resource_type": "network",
+                       "pool_type" : "dynamic",}]})
+
+        resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+
+    def test_configure_resource_orchestrator(self, so_proxy):
+        cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
+                                                'ro_port'      :  2022,
+                                                'ro_username'  : 'admin',
+                                                'ro_password'  : 'admin'})
+        so_proxy.merge_config('/rw-conman:cm-config', cfg)
+
+    def test_configure_service_orchestrator(self, nsm_proxy):
+        cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
+                                              'cm_port'      :  2022,
+                                              'cm_username'  : 'admin',
+                                              'cm_password'  : 'admin'})
+        nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
+
+    
+    def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file):
+        logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file)
+        trans_id = upload_descriptor(logger, tg_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should be one vnfds"
+        assert "trafgen_vnfd" in [vnfds[0].name]
+
+    def test_onboard_vrouter_vnfd(self, logger, vnfd_proxy, vrouter_vnfd_package_file):
+        logger.info("Onboarding vrouter_vnfd package: %s", vrouter_vnfd_package_file)
+        trans_id = upload_descriptor(logger, vrouter_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should be two vnfds"
+        assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_ts_vnfd(self, logger, vnfd_proxy, ts_vnfd_package_file):
+        logger.info("Onboarding trafsink_vnfd package: %s", ts_vnfd_package_file)
+        trans_id = upload_descriptor(logger, ts_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 3, "There should be three vnfds"
+        assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name]
+
+    def test_onboard_tg_2vrouter_ts_nsd(self, logger, nsd_proxy, tg_2vrouter_ts_nsd_package_file):
+        logger.info("Onboarding tg_2vrouter_ts nsd package: %s", tg_2vrouter_ts_nsd_package_file)
+        trans_id = upload_descriptor(logger, tg_2vrouter_ts_nsd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+        assert nsd.name == "tg_vrouter_ts_nsd"
+        assert nsd.short_name == "tg_2vrouter_ts_nsd"
+
+    def test_instantiate_tg_2vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        nsr = create_nsr_from_nsd_id(nsd.id)
+        nsr_proxy.merge_config('/ns-instance-config', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
+
+
diff --git a/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py b/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py
new file mode 100644
index 0000000..4d6e345
--- /dev/null
+++ b/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py
@@ -0,0 +1,323 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_3vnfs_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@date 10/15/2015
+@brief Launchpad Module Test ExtVNF
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import subprocess
+import time
+import uuid
+
+import gi
+gi.require_version('RwIwpYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+
+
+from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+RW_VROUTER_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/vrouter"
+    )
+RW_TRAFGEN_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/trafgen"
+    )
+RW_TRAFSINK_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/trafsink"
+    )
+RW_TG_VROUTER_TS_NSD_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/nsds/tg_vrouter_ts"
+    )
+
+
+class PackageError(Exception):
+    pass
+
+
+def raise_package_error():
+    raise PackageError("Could not find ns packages")
+
+
+@pytest.fixture(scope='module')
+def iwp_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwIwpYang)
+
+@pytest.fixture(scope='module')
+def resource_mgr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwResourceMgrYang)
+
+
+@pytest.fixture(scope='module')
+def cloud_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwCloudYang)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+@pytest.fixture(scope='module')
+def so_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwConmanYang)
+
+@pytest.fixture(scope='module')
+def nsm_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsmYang)
+
+@pytest.fixture(scope='session')
+def vrouter_vnfd_package_file():
+    vrouter_pkg_file = os.path.join(
+            RW_VROUTER_PKG_INSTALL_DIR,
+            "vrouter_vnfd_with_epa_sriov.tar.gz",
+            )
+    if not os.path.exists(vrouter_pkg_file):
+        raise_package_error()
+
+    return vrouter_pkg_file
+
+@pytest.fixture(scope='session')
+def tg_vnfd_package_file():
+    tg_pkg_file = os.path.join(
+            RW_TRAFGEN_PKG_INSTALL_DIR,
+            "trafgen_vnfd_with_epa_sriov.tar.gz",
+            )
+    if not os.path.exists(tg_pkg_file):
+        raise_package_error()
+
+    return tg_pkg_file
+
+@pytest.fixture(scope='session')
+def ts_vnfd_package_file():
+    ts_pkg_file = os.path.join(
+            RW_TRAFSINK_PKG_INSTALL_DIR,
+            "trafsink_vnfd_with_epa_sriov.tar.gz",
+            )
+    if not os.path.exists(ts_pkg_file):
+        raise_package_error()
+
+    return ts_pkg_file
+
+@pytest.fixture(scope='session')
+def tg_vrouter_ts_nsd_package_file():
+    tg_vrouter_ts_nsd_pkg_file = os.path.join(
+            RW_TG_VROUTER_TS_NSD_PKG_INSTALL_DIR,
+            "tg_vrouter_ts_nsd_with_epa_sriov.tar.gz",
+            )
+    if not os.path.exists(tg_vrouter_ts_nsd_pkg_file):
+        raise_package_error()
+
+    return tg_vrouter_ts_nsd_pkg_file
+
+
+def create_nsr_from_nsd_id(nsd_id):
+    nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+    nsr.id = str(uuid.uuid4())
+    nsr.name = "TG-Vrouter-TS-EPA-SRIOV"
+    nsr.short_name = "TG-Vrouter-TS-EPA-SRIOV"
+    nsr.description = "3 VNFs with Trafgen, Vrouter and Trafsink EPA SRIOV"
+    nsr.nsd_ref = nsd_id
+    nsr.admin_status = "ENABLED"
+
+    return nsr
+
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete",
+             transaction_id)
+    start_time = time.time()
+    while (time.time() - start_time) < timeout_secs:
+        r = requests.get(
+                'http://{host}:4567/api/upload/{t_id}/state'.format(
+                    host=host, t_id=transaction_id
+                    )
+                )
+        state = r.json()
+        if state["status"] == "pending":
+            time.sleep(1)
+            continue
+
+        elif state["status"] == "success":
+            logger.info("Descriptor onboard was successful")
+            return
+
+        else:
+            raise DescriptorOnboardError(state)
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+@pytest.mark.incremental
+class TestLaunchpadStartStop(object):
+    def test_configure_cloud_account(self, cloud_proxy, logger):
+        cloud_account = RwCloudYang.CloudAccountConfig()
+        #cloud_account.name = "cloudsim_proxy"
+        #cloud_account.account_type = "cloudsim_proxy"
+        cloud_account.name = "riftuser1"
+        cloud_account.account_type = "openstack"
+        cloud_account.openstack.key = 'pluto'
+        cloud_account.openstack.secret = 'mypasswd'
+        cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/'
+        cloud_account.openstack.tenant = 'demo'
+        cloud_account.openstack.mgmt_network = 'private'
+
+        cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+
+    def test_configure_pools(self, resource_mgr_proxy):
+        pools = RwResourceMgrYang.ResourcePools.from_dict({
+            "pools": [{ "name": "vm_pool_a",
+                        "resource_type": "compute",
+                        "pool_type" : "dynamic"},
+                      {"name": "network_pool_a",
+                       "resource_type": "network",
+                       "pool_type" : "dynamic",}]})
+
+        resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+
+    def test_configure_resource_orchestrator(self, so_proxy):
+        cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
+                                                'ro_port'      :  2022,
+                                                'ro_username'  : 'admin',
+                                                'ro_password'  : 'admin'})
+        so_proxy.merge_config('/rw-conman:cm-config', cfg)
+
+    def test_configure_service_orchestrator(self, nsm_proxy):
+        cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
+                                              'cm_port'      :  2022,
+                                              'cm_username'  : 'admin',
+                                              'cm_password'  : 'admin'})
+        nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
+
+    
+    def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file):
+        logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file)
+        trans_id = upload_descriptor(logger, tg_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should be one vnfds"
+        assert "trafgen_vnfd" in [vnfds[0].name]
+
+    def test_onboard_vrouter_vnfd(self, logger, vnfd_proxy, vrouter_vnfd_package_file):
+        logger.info("Onboarding vrouter_vnfd package: %s", vrouter_vnfd_package_file)
+        trans_id = upload_descriptor(logger, vrouter_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should be two vnfds"
+        assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_ts_vnfd(self, logger, vnfd_proxy, ts_vnfd_package_file):
+        logger.info("Onboarding trafsink_vnfd package: %s", ts_vnfd_package_file)
+        trans_id = upload_descriptor(logger, ts_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 3, "There should be three vnfds"
+        assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name]
+
+    def test_onboard_tg_vrouter_ts_nsd(self, logger, nsd_proxy, tg_vrouter_ts_nsd_package_file):
+        logger.info("Onboarding tg_vrouter_ts nsd package: %s", tg_vrouter_ts_nsd_package_file)
+        trans_id = upload_descriptor(logger, tg_vrouter_ts_nsd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+        assert nsd.name == "tg_vrouter_ts_nsd"
+
+    def test_instantiate_tg_vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        nsr = create_nsr_from_nsd_id(nsd.id)
+        nsr_proxy.merge_config('/ns-instance-config', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
+
+
diff --git a/rwlaunchpad/test/racfg/lprecovery_test.racfg b/rwlaunchpad/test/racfg/lprecovery_test.racfg
new file mode 100644
index 0000000..43e07aa
--- /dev/null
+++ b/rwlaunchpad/test/racfg/lprecovery_test.racfg
@@ -0,0 +1,19 @@
+{
+  "test_name":"TC_LPRECOVERY_TEST",
+  "commandline":"./launchpad_recovery",
+  "target_vm":"VM",
+  "test_description":"Test targeting launchpad recovery feature",
+  "run_as_root": true,
+  "status":"broken",
+  "keywords":["nightly","smoke"],
+  "timelimit": 4800,
+  "networks":[],
+  "vms":[
+    {
+      "name": "VM",
+      "memory": 8192,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/test/tosca_ut.py b/rwlaunchpad/test/tosca_ut.py
new file mode 100755
index 0000000..40efe41
--- /dev/null
+++ b/rwlaunchpad/test/tosca_ut.py
@@ -0,0 +1,183 @@
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
+
+import argparse
+import logging
+import os
+import shutil
+import sys
+import tarfile
+import tempfile
+import unittest
+import xmlrunner
+
+import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
+
+from rift.mano.utils.compare_desc import CompareDescShell
+
+from rift.tasklets.rwlaunchpad.tosca import ExportTosca
+from rift.tasklets.rwlaunchpad.tosca import ImportTosca
+
+from rift.package.package import TarPackageArchive
+
+class PingPongDescriptors(object):
+    def __init__(self):
+        ping_vnfd, pong_vnfd, nsd = \
+                ping_pong_nsd.generate_ping_pong_descriptors(
+                    pingcount=1,
+                    external_vlr_count=1,
+                    internal_vlr_count=0,
+                    num_vnf_vms=1,
+                    ping_md5sum='1234567890abcdefg',
+                    pong_md5sum='1234567890abcdefg',
+                    mano_ut=False,
+                    use_scale_group=True,
+                    use_mon_params=True,
+                    use_placement_group=False,
+                    use_ns_init_conf=False,
+                )
+        self.ping_pong_nsd = nsd.descriptor.nsd[0]
+        self.ping_vnfd = ping_vnfd.descriptor.vnfd[0]
+        self.pong_vnfd = pong_vnfd.descriptor.vnfd[0]
+
+
+class ToscaTestCase(unittest.TestCase):
+    """ Unittest for YANG to TOSCA and back translations
+
+    This generates the Ping Pong descrptors using the script
+    in examles and then converts it to TOSCA and back to YANG.
+    """
+    default_timeout = 0
+    top_dir = __file__[:__file__.find('/modules/core/')]
+    log_level = logging.WARN
+    log = None
+
+    @classmethod
+    def setUpClass(cls):
+        fmt = logging.Formatter(
+                '%(asctime)-23s %(levelname)-5s  (%(name)s@%(process)d:%(filename)s:%(lineno)d) - %(message)s')
+        stderr_handler = logging.StreamHandler(stream=sys.stderr)
+        stderr_handler.setFormatter(fmt)
+        logging.basicConfig(level=cls.log_level)
+        cls.log = logging.getLogger('tosca-ut')
+        cls.log.addHandler(stderr_handler)
+
+    def setUp(self):
+        """Run before each test method to initialize test environment."""
+
+        super(ToscaTestCase, self).setUp()
+        self.output_dir = tempfile.mkdtemp()
+
+    def compare_dict(self, gen_d, exp_d):
+        gen = "--generated="+str(gen_d)
+        exp = "--expected="+str(exp_d)
+        CompareDescShell.compare_dicts(gen, exp, log=self.log)
+
+    def yang_to_tosca(self, descs):
+        """Convert YANG model to TOSCA model"""
+        pkg = ExportTosca(self.log)
+        nsd_id = pkg.add_nsd(descs.ping_pong_nsd)
+        pkg.add_vnfd(nsd_id, descs.ping_vnfd)
+        pkg.add_vnfd(nsd_id, descs.pong_vnfd)
+
+        return pkg.create_archive('ping_pong_nsd', self.output_dir)
+
+    def tosca_to_yang(self, tosca_file):
+        """Convert TOSCA model to YANG model"""
+        if ImportTosca.is_tosca_package(tosca_file):
+            # This could be a tosca package, try processing
+            tosca = ImportTosca(self.log, tosca_file, out_dir=self.output_dir)
+            files = tosca.translate()
+            if files is None or len(files) < 3:
+                raise ValueError("Could not process as a "
+                                 "TOSCA package {}: {}".format(tosca_file, files))
+            else:
+                 self.log.info("Tosca package was translated successfully")
+                 return files
+        else:
+            raise ValueError("Not a valid TOSCA archive: {}".
+                             format(tosca_file))
+
+    def compare_descs(self, descs, yang_files):
+        """Compare the sescriptors generated with original"""
+        for yang_file in yang_files:
+            if tarfile.is_tarfile(yang_file):
+                with open(yang_file, "r+b") as tar:
+                    archive = TarPackageArchive(self.log, tar)
+                    pkg = archive.create_package()
+                    desc_type = pkg.descriptor_type
+                    if desc_type == 'nsd':
+                        nsd_yang = pkg.descriptor_msg.as_dict()
+                        self.compare_dict(nsd_yang,
+                                          descs.ping_pong_nsd.as_dict())
+                    elif desc_type == 'vnfd':
+                        vnfd_yang = pkg.descriptor_msg.as_dict()
+                        if 'ping_vnfd' == vnfd_yang['name']:
+                            self.compare_dict(vnfd_yang,
+                                              descs.ping_vnfd.as_dict())
+                        elif 'pong_vnfd' == vnfd_yang['name']:
+                            self.compare_dict(vnfd_yang,
+                                              descs.pong_vnfd.as_dict())
+                        else:
+                            raise Exception("Unknown descriptor type {} found: {}".
+                                            format(desc_type, pkg.files))
+            else:
+                raise Exception("Did not find a valid tar file for yang model: {}".
+                                format(yang_file))
+
+    def test_output(self):
+        try:
+            # Generate the Ping Pong descriptors
+            descs = PingPongDescriptors()
+
+            # Translate the descriptors to TOSCA
+            tosca_file = self.yang_to_tosca(descs)
+
+            # Now translate back to YANG
+            yang_files = self.tosca_to_yang(tosca_file)
+
+            # Compare the generated YANG to original
+            self.compare_descs(descs, yang_files)
+
+            # Removing temp dir only on success to allow debug in case of failures
+            if self.output_dir is not None:
+                shutil.rmtree(self.output_dir)
+                self.output_dir = None
+
+        except Exception as e:
+            self.log.exception(e)
+            self.fail("Exception {}".format(e))
+
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+    else:
+        runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    ToscaTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/test/utest_nsr_handler.py b/rwlaunchpad/test/utest_nsr_handler.py
new file mode 100755
index 0000000..ffab929
--- /dev/null
+++ b/rwlaunchpad/test/utest_nsr_handler.py
@@ -0,0 +1,485 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import asyncio
+import logging
+import os
+import sys
+import time
+import unittest
+import uuid
+
+import xmlrunner
+
+import gi.repository.RwDts as rwdts
+import gi.repository.RwNsmYang as rwnsmyang
+import gi.repository.NsrYang as NsrYang
+import gi.repository.RwNsrYang as RwNsrYang
+import gi.repository.RwTypes as RwTypes
+import gi.repository.ProtobufC as ProtobufC
+import gi.repository.RwResourceMgrYang as RwResourceMgrYang
+import gi.repository.RwLaunchpadYang as launchpadyang
+import rift.tasklets
+import rift.test.dts
+
+import mano_ut
+
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class NsrDtsHandler(object):
+    """ The network service DTS handler """
+    NSR_XPATH = "C,/nsr:ns-instance-config/nsr:nsr"
+    SCALE_INSTANCE_XPATH = "C,/nsr:ns-instance-config/nsr:nsr/nsr:scaling-group/nsr:instance"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+
+        self._nsr_regh = None
+        self._scale_regh = None
+
+    @property
+    def nsm(self):
+        """ Return the NS manager instance """
+        return self._nsm
+
+    def get_scale_group_instances(self, nsr_id, group_name):
+        def nsr_id_from_keyspec(ks):
+            nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+            nsr_id = nsr_path_entry.key00.id
+            return nsr_id
+
+        def group_name_from_keyspec(ks):
+            group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+            group_name = group_path_entry.key00.scaling_group_name_ref
+            return group_name
+
+
+        xact_ids = set()
+        for instance_cfg, keyspec in self._scale_regh.get_xact_elements(include_keyspec=True):
+            elem_nsr_id = nsr_id_from_keyspec(keyspec)
+            if elem_nsr_id != nsr_id:
+                continue
+
+            elem_group_name = group_name_from_keyspec(keyspec)
+            if elem_group_name != group_name:
+                continue
+
+            xact_ids.add(instance_cfg.id)
+
+        return xact_ids
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for Nsr create/update/delete/read requests from dts """
+
+        def nsr_id_from_keyspec(ks):
+            nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+            nsr_id = nsr_path_entry.key00.id
+            return nsr_id
+
+        def group_name_from_keyspec(ks):
+            group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+            group_name = group_path_entry.key00.scaling_group_name_ref
+            return group_name
+
+        def is_instance_in_reg_elements(nsr_id, group_name, instance_id):
+            """ Return boolean indicating if scaling group instance was already commited previously.
+
+            By looking at the existing elements in this registration handle (elements not part
+            of this current xact), we can tell if the instance was configured previously without
+            keeping any application state.
+            """
+            for instance_cfg, keyspec in self._nsr_regh.get_xact_elements(include_keyspec=True):
+                elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                elem_group_name = group_name_from_keyspec(keyspec)
+
+                if elem_nsr_id != nsr_id or group_name != elem_group_name:
+                    continue
+
+                if instance_cfg.id == instance_id:
+                    return True
+
+            return False
+
+        def get_scale_group_instance_delta(nsr_id, group_name, xact):
+
+            #1. Find all elements in the  transaction add to the "added"
+            #2. Find matching elements in current elements, remove from "added".
+            #3. Find elements only in current, add to "deleted"
+
+            xact_ids = set()
+            for instance_cfg, keyspec in self._scale_regh.get_xact_elements(xact, include_keyspec=True):
+                elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                if elem_nsr_id != nsr_id:
+                    continue
+
+                elem_group_name = group_name_from_keyspec(keyspec)
+                if elem_group_name != group_name:
+                    continue
+
+                xact_ids.add(instance_cfg.id)
+
+            current_ids = set()
+            for instance_cfg, keyspec in self._scale_regh.get_xact_elements(include_keyspec=True):
+                elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                if elem_nsr_id != nsr_id:
+                    continue
+
+                elem_group_name = group_name_from_keyspec(keyspec)
+                if elem_group_name != group_name:
+                    continue
+
+                current_ids.add(instance_cfg.id)
+
+            delta = {
+                    "added": xact_ids - current_ids,
+                    "deleted": current_ids - xact_ids
+                    }
+            return delta
+
+        def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
+            # Unforunately, it is currently difficult to figure out what has exactly
+            # changed in this xact without Pbdelta support (RIFT-4916)
+            # As a workaround, we can fetch the pre and post xact elements and
+            # perform a comparison to figure out adds/deletes/updates
+            xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+            curr_cfgs = list(dts_member_reg.elements)
+
+            xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+            curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+            # Find Adds
+            added_keys = set(xact_key_map) - set(curr_key_map)
+            added_cfgs = [xact_key_map[key] for key in added_keys]
+
+            # Find Deletes
+            deleted_keys = set(curr_key_map) - set(xact_key_map)
+            deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+            # Find Updates
+            updated_keys = set(curr_key_map) & set(xact_key_map)
+            updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
+
+            return added_cfgs, deleted_cfgs, updated_cfgs
+
+        def on_apply(dts, acg, xact, action, scratch):
+            """Apply the  configuration"""
+            def handle_create_nsr(msg):
+                # Handle create nsr requests """
+                # Do some validations
+                if not msg.has_field("nsd_ref"):
+                    err = "NSD reference not provided"
+                    self._log.error(err)
+                    raise NetworkServiceRecordError(err)
+
+                self._log.info("Creating NetworkServiceRecord %s  from nsd_id  %s",
+                               msg.id, msg.nsd_ref)
+
+                #nsr = self.nsm.create_nsr(msg)
+                return nsr
+
+            def handle_delete_nsr(msg):
+                @asyncio.coroutine
+                def delete_instantiation(ns_id):
+                    """ Delete instantiation """
+                    pass
+                    #with self._dts.transaction() as xact:
+                        #yield from self._nsm.terminate_ns(ns_id, xact)
+
+                # Handle delete NSR requests
+                self._log.info("Delete req for  NSR Id: %s received", msg.id)
+                # Terminate the NSR instance
+                #nsr = self._nsm.get_ns_by_nsr_id(msg.id)
+
+                #nsr.set_state(NetworkServiceRecordState.TERMINATE_RCVD)
+                #event_descr = "Terminate rcvd for NS Id:%s" % msg.id
+                #nsr.record_event("terminate-rcvd", event_descr)
+
+                #self._loop.create_task(delete_instantiation(msg.id))
+
+            @asyncio.coroutine
+            def begin_instantiation(nsr):
+                # Begin instantiation
+                pass
+                #self._log.info("Beginning NS instantiation: %s", nsr.id)
+                #yield from self._nsm.instantiate_ns(nsr.id, xact)
+
+            self._log.debug("Got nsr apply (xact: %s) (action: %s)(scr: %s)",
+                            xact, action, scratch)
+
+            if action == rwdts.AppconfAction.INSTALL and xact.id is None:
+                self._log.debug("No xact handle.  Skipping apply config")
+                xact = None
+
+            (added_msgs, deleted_msgs, updated_msgs) = get_add_delete_update_cfgs(self._nsr_regh, xact, "id")
+
+            for msg in added_msgs:
+                self._log.info("Create NSR received in on_apply to instantiate NS:%s", msg.id)
+                #if msg.id not in self._nsm.nsrs:
+                #    self._log.info("Create NSR received in on_apply to instantiate NS:%s", msg.id)
+                #    nsr = handle_create_nsr(msg)
+                #    self._loop.create_task(begin_instantiation(nsr))
+
+            for msg in deleted_msgs:
+                self._log.info("Delete NSR received in on_apply to terminate NS:%s", msg.id)
+                try:
+                    handle_delete_nsr(msg)
+                except Exception:
+                    self._log.exception("Failed to terminate NS:%s", msg.id)
+
+            for msg in updated_msgs:
+                self._log.info("Update NSR received in on_apply to change scaling groups in NS:%s", msg.id)
+
+                for group in msg.scaling_group:
+                    instance_delta = get_scale_group_instance_delta(msg.id, group.scaling_group_name_ref, xact)
+                    self._log.debug("Got NSR:%s scale group instance delta: %s", msg.id, instance_delta)
+
+                    #for instance_id in instance_delta["added"]:
+                    #    self._nsm.scale_nsr_out(msg.id, group.scaling_group_name_ref, instance_id, xact)
+
+                    #for instance_id in instance_delta["deleted"]:
+                    #    self._nsm.scale_nsr_in(msg.id, group.scaling_group_name_ref, instance_id)
+
+
+            return RwTypes.RwStatus.SUCCESS
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ Prepare calllback from DTS for NSR """
+
+            xpath = ks_path.to_xpath(NsrYang.get_schema())
+            action = xact_info.query_action
+            self._log.debug(
+                    "Got Nsr prepare callback (xact: %s) (action: %s) (info: %s), %s:%s)",
+                    xact, action, xact_info, xpath, msg
+                    )
+
+            fref = ProtobufC.FieldReference.alloc()
+            fref.goto_whole_message(msg.to_pbcm())
+
+            if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+                pass
+                # Ensure the Cloud account has been specified if this is an NSR create
+                #if msg.id not in self._nsm.nsrs:
+                #    if not msg.has_field("cloud_account"):
+                #        raise NsrInstantiationFailed("Cloud account not specified in NSR")
+
+                # We do not allow scaling actions to occur if the NS is not in running state
+                #elif msg.has_field("scaling_group"):
+                #    nsr = self._nsm.nsrs[msg.id]
+                #    if nsr.state != NetworkServiceRecordState.RUNNING:
+                #        raise ScalingOperationError("Unable to perform scaling action when NS is not in running state")
+
+                #    if len(msg.scaling_group) > 1:
+                #        raise ScalingOperationError("Only a single scaling group can be configured at a time")
+
+                #    for group_msg in msg.scaling_group:
+                #        num_new_group_instances = len(group_msg.instance)
+                #        if num_new_group_instances > 1:
+                #            raise ScalingOperationError("Only a single scaling instance can be created at a time")
+
+                #        elif num_new_group_instances == 1:
+                #            scale_group = nsr.scaling_groups[group_msg.scaling_group_name_ref]
+                #            if len(scale_group.instances) == scale_group.max_instance_count:
+                #                raise ScalingOperationError("Max instances for %s reached" % scale_group)
+
+
+            acg.handle.prepare_complete_ok(xact_info.handle)
+
+
+        self._log.debug("Registering for NSR config using xpath: %s",
+                        NsrDtsHandler.NSR_XPATH)
+
+        acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
+        with self._dts.appconf_group_create(handler=acg_hdl) as acg:
+            self._nsr_regh = acg.register(xpath=NsrDtsHandler.NSR_XPATH,
+                                      flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+                                      on_prepare=on_prepare)
+
+            self._scale_regh = acg.register(
+                                      xpath=NsrDtsHandler.SCALE_INSTANCE_XPATH,
+                                      flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+                                      )
+
+
+class XPaths(object):
+    @staticmethod
+    def nsr_config(nsr_id=None):
+        return ("C,/nsr:ns-instance-config/nsr:nsr" +
+                ("[nsr:id='{}']".format(nsr_id) if nsr_id is not None else ""))
+
+    def scaling_group_instance(nsr_id, group_name, instance_id):
+        return ("C,/nsr:ns-instance-config/nsr:nsr" +
+                "[nsr:id='{}']".format(nsr_id) +
+                "/nsr:scaling-group" +
+                "[nsr:scaling-group-name-ref='{}']".format(group_name) +
+                "/nsr:instance" +
+                "[nsr:id='{}']".format(instance_id)
+                )
+
+
+class NsrHandlerTestCase(rift.test.dts.AbstractDTSTest):
+    """
+    DTS GI interface unittests
+    """
+    @classmethod
+    def configure_schema(cls):
+        return NsrYang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", self.id())
+        self.tinfo = self.new_tinfo(self.id())
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+        self.handler = NsrDtsHandler(self.dts, self.log, self.loop, None)
+
+        self.tinfo_c = self.new_tinfo(self.id() + "_client")
+        self.dts_c = rift.tasklets.DTS(self.tinfo_c, self.schema, self.loop)
+
+    @rift.test.dts.async_test
+    def test_add_delete_ns(self):
+
+        nsr1_uuid = "nsr1_uuid" # str(uuid.uuid4())
+        nsr2_uuid = "nsr2_uuid" # str(uuid.uuid4())
+
+        assert nsr1_uuid != nsr2_uuid
+
+        yield from self.handler.register()
+        yield from asyncio.sleep(.5, loop=self.loop)
+
+        self.log.debug("Creating NSR")
+        with self.dts_c.transaction() as xact:
+            block = xact.block_create()
+            block.add_query_update(
+                XPaths.nsr_config(nsr1_uuid),
+                NsrYang.YangData_Nsr_NsInstanceConfig_Nsr(id=nsr1_uuid, name="fu"),
+                flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
+                )
+            yield from block.execute(now=True)
+
+        yield from asyncio.sleep(.5, loop=self.loop)
+
+        with self.dts_c.transaction() as xact:
+            block = xact.block_create()
+            block.add_query_update(
+                    XPaths.scaling_group_instance(nsr1_uuid, "group", 1234),
+                    NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=1234),
+                    flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
+                    )
+            yield from block.execute(now=True)
+
+        yield from asyncio.sleep(.5, loop=self.loop)
+
+        with self.dts_c.transaction() as xact:
+            block = xact.block_create()
+            block.add_query_delete(
+                    XPaths.scaling_group_instance(nsr1_uuid, "group", 1234),
+                    flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
+                    )
+            yield from block.execute(now=True)
+
+        yield from asyncio.sleep(.5, loop=self.loop)
+
+        with self.dts_c.transaction() as xact:
+            block = xact.block_create()
+            block.add_query_create(
+                    XPaths.scaling_group_instance(nsr1_uuid, "group", 12345),
+                    NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=12345),
+                    flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
+                    )
+            yield from block.execute(now=True)
+
+        yield from asyncio.sleep(.5, loop=self.loop)
+
+        group_ids = self.handler.get_scale_group_instances(nsr2_uuid, "group")
+        self.log.debug("Got group ids in nsr2 after adding 12345 to nsr1: %s", group_ids)
+        group_ids = self.handler.get_scale_group_instances(nsr1_uuid, "group")
+        self.log.debug("Got group ids in nsr1 after adding 12345 to nsr1: %s", group_ids)
+        assert group_ids == {12345}
+
+        self.log.debug("\n\nADD A COMPLETELY DIFFERENT NSR\n")
+        with self.dts_c.transaction() as xact:
+            block = xact.block_create()
+            block.add_query_update(
+                XPaths.nsr_config(nsr2_uuid),
+                NsrYang.YangData_Nsr_NsInstanceConfig_Nsr(id=nsr2_uuid, name="fu2"),
+                flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
+                )
+            yield from block.execute(now=True)
+
+        yield from asyncio.sleep(.5, loop=self.loop)
+ 
+        group_ids = self.handler.get_scale_group_instances(nsr2_uuid, "group")
+        self.log.debug("Got group ids in nsr2 after adding new nsr: %s", group_ids)
+        group_ids = self.handler.get_scale_group_instances(nsr1_uuid, "group")
+        self.log.debug("Got group ids in nsr1 after adding new nsr: %s", group_ids)
+        assert group_ids == {12345}
+
+        self.log.debug("\n\nDELETE A COMPLETELY DIFFERENT NSR\n")
+        with self.dts_c.transaction() as xact:
+            block = xact.block_create()
+            block.add_query_delete(
+                XPaths.nsr_config(nsr2_uuid),
+                flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
+                )
+            yield from block.execute(now=True)
+
+        yield from asyncio.sleep(.5, loop=self.loop)
+
+        group_ids = self.handler.get_scale_group_instances(nsr2_uuid, "group")
+        self.log.debug("Got group ids in nsr2 after deleting nsr2: %s", group_ids)
+        group_ids = self.handler.get_scale_group_instances(nsr1_uuid, "group")
+        self.log.debug("Got group ids in nsr1 after deleting nsr2: %s", group_ids)
+        assert group_ids == {12345}
+
+        with self.dts_c.transaction() as xact:
+            block = xact.block_create()
+            block.add_query_delete(
+                    XPaths.scaling_group_instance(nsr1_uuid, "group", 12345),
+                    flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
+                    )
+            yield from block.execute(now=True)
+
+        yield from asyncio.sleep(2, loop=self.loop)
+
+def main():
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+
+    NsrHandlerTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/test/utest_ro_account.py b/rwlaunchpad/test/utest_ro_account.py
new file mode 100644
index 0000000..6e480d4
--- /dev/null
+++ b/rwlaunchpad/test/utest_ro_account.py
@@ -0,0 +1,153 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import sys
+import types
+import unittest
+import uuid
+
+import rift.test.dts
+import rift.tasklets.rwnsmtasklet.cloud as cloud
+import rift.tasklets.rwnsmtasklet.openmano_nsm as openmano_nsm
+import rw_peas
+
+import gi
+gi.require_version('RwDtsYang', '1.0')
+from gi.repository import (
+        RwLaunchpadYang as launchpadyang,
+        RwDts as rwdts,
+        RwVnfdYang,
+        RwVnfrYang,
+        RwNsrYang,
+        RwNsdYang,
+        VnfrYang
+        )
+
+
+class DescriptorPublisher(object):
+    def __init__(self, log, dts, loop):
+        self.log = log
+        self.loop = loop
+        self.dts = dts
+
+        self._registrations = []
+
+    @asyncio.coroutine
+    def publish(self, w_path, path, desc):
+        ready_event = asyncio.Event(loop=self.loop)
+
+        @asyncio.coroutine
+        def on_ready(regh, status):
+            self.log.debug("Create element: %s, obj-type:%s obj:%s",
+                           path, type(desc), desc)
+            with self.dts.transaction() as xact:
+                regh.create_element(path, desc, xact.xact)
+            self.log.debug("Created element: %s, obj:%s", path, desc)
+            ready_event.set()
+
+        handler = rift.tasklets.DTS.RegistrationHandler(
+                on_ready=on_ready
+                )
+
+        self.log.debug("Registering path: %s, obj:%s", w_path, desc)
+        reg = yield from self.dts.register(
+                w_path,
+                handler,
+                flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
+                )
+        self._registrations.append(reg)
+        self.log.debug("Registered path : %s", w_path)
+        yield from ready_event.wait()
+
+        return reg
+
+    def unpublish_all(self):
+        self.log.debug("Deregistering all published descriptors")
+        for reg in self._registrations:
+            reg.deregister()
+
+class RoAccountDtsTestCase(rift.test.dts.AbstractDTSTest):
+    @classmethod
+    def configure_schema(cls):
+       return launchpadyang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", test_id)
+        self.tinfo = self.new_tinfo(str(test_id))
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+
+        self.tinfo_sub = self.new_tinfo(str(test_id) + "_sub")
+        self.dts_sub = rift.tasklets.DTS(self.tinfo_sub, self.schema, self.loop)
+
+        self.publisher = DescriptorPublisher(self.log, self.dts, self.loop)
+
+    def tearDown(self):
+        super().tearDown()
+
+    @rift.test.dts.async_test
+    def test_orch_account_create(self):
+        orch = cloud.ROAccountPluginSelector(self.dts, self.log, self.loop, None)
+
+        yield from orch.register()
+
+        # Test if we have a default plugin in case no RO is specified.
+        assert type(orch.ro_plugin) is cloud.RwNsPlugin
+        mock_orch_acc = launchpadyang.ResourceOrchestrator.from_dict(
+                {'name': 'rift-ro', 'account_type': 'rift_ro', 'rift_ro': {'rift_ro': True}})
+
+        # Test rift-ro plugin
+        w_xpath = "C,/rw-launchpad:resource-orchestrator"
+        xpath = w_xpath
+        yield from self.publisher.publish(w_xpath, xpath, mock_orch_acc)
+        yield from asyncio.sleep(5, loop=self.loop)
+
+        assert type(orch.ro_plugin) is cloud.RwNsPlugin
+
+        # Test Openmano plugin
+        mock_orch_acc = launchpadyang.ResourceOrchestrator.from_dict(
+                {'name': 'openmano',
+                 'account_type': 'openmano',
+                 'openmano': {'tenant_id': "abc"}})
+        yield from self.publisher.publish(w_xpath, xpath, mock_orch_acc)
+        yield from asyncio.sleep(5, loop=self.loop)
+
+        print (type(orch.ro_plugin))
+        assert type(orch.ro_plugin) is openmano_nsm.OpenmanoNsPlugin
+
+        # Test delete
+        yield from self.dts.query_delete("C,/rw-launchpad:resource-orchestrator",
+                flags=rwdts.XactFlag.ADVISE)
+        assert orch.ro_plugin == None
+
+
+def main(argv=sys.argv[1:]):
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(
+            argv=[__file__] + argv,
+            testRunner=None#xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+            )
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/rwlaunchpad/test/utest_rwmonitor.py b/rwlaunchpad/test/utest_rwmonitor.py
new file mode 100755
index 0000000..46c33b3
--- /dev/null
+++ b/rwlaunchpad/test/utest_rwmonitor.py
@@ -0,0 +1,873 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import asyncio
+import concurrent.futures
+import logging
+import os
+import sys
+import time
+import unittest
+import uuid
+import xmlrunner
+
+import gi
+gi.require_version('NsrYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwmonYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwMon', '1.0')
+
+from gi.repository import (
+        NsrYang,
+        RwTypes,
+        RwVnfrYang,
+        RwcalYang,
+        RwmonYang,
+        VnfrYang,
+        )
+
+from rift.tasklets.rwmonitor.core import (
+        AccountAlreadyRegisteredError,
+        AccountInUseError,
+        InstanceConfiguration,
+        Monitor,
+        NfviInterface,
+        NfviMetrics,
+        NfviMetricsCache,
+        NfviMetricsPluginManager,
+        PluginFactory,
+        PluginNotSupportedError,
+        PluginUnavailableError,
+        UnknownAccountError,
+        )
+import rw_peas
+
+
+class wait_for_pending_tasks(object):
+    """
+    This class defines a decorator that can be used to ensure that any asyncio
+    tasks created as a side-effect of coroutine are allowed to come to
+    completion.
+    """
+
+    def __init__(self, loop, timeout=1):
+        self.loop = loop
+        self.timeout = timeout
+
+    def __call__(self, coro):
+        @asyncio.coroutine
+        def impl():
+            original = self.pending_tasks()
+            result = yield from coro()
+
+            current = self.pending_tasks()
+            remaining = current - original
+
+            if remaining:
+                yield from asyncio.wait(
+                        remaining,
+                        timeout=self.timeout,
+                        loop=self.loop,
+                        )
+
+            return result
+
+        return impl
+
+    def pending_tasks(self):
+        return {t for t in asyncio.Task.all_tasks(loop=self.loop) if not t.done()}
+
+
+class MockTasklet(object):
+    def __init__(self, dts, log, loop, records):
+        self.dts = dts
+        self.log = log
+        self.loop = loop
+        self.records = records
+        self.polling_period = 0
+        self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=16)
+
+
+def make_nsr(ns_instance_config_ref=str(uuid.uuid4())):
+    nsr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr()
+    nsr.ns_instance_config_ref = ns_instance_config_ref
+    return nsr
+
+def make_vnfr(id=str(uuid.uuid4())):
+    vnfr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+    vnfr.id = id
+    return vnfr
+
+def make_vdur(id=str(uuid.uuid4()), vim_id=str(uuid.uuid4())):
+    vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+    vdur.id = id
+    vdur.vim_id = vim_id
+    return vdur
+
+
+class TestNfviMetricsCache(unittest.TestCase):
+    class Plugin(object):
+        def nfvi_metrics_available(self, cloud_account):
+            return True
+
+        def nfvi_metrics(self, account, vim_id):
+            metrics = RwmonYang.NfviMetrics()
+            metrics.vcpu.utilization = 0.5
+            return metrics
+
+    def setUp(self):
+        self.loop = asyncio.new_event_loop()
+        self.logger = logging.getLogger('test-logger')
+
+        self.account = RwcalYang.CloudAccount(
+                name='test-cloud-account',
+                account_type="mock",
+                )
+
+        self.plugin_manager = NfviMetricsPluginManager(self.logger)
+        self.plugin_manager.register(self.account, "mock")
+
+        mock = self.plugin_manager.plugin(self.account.name)
+        mock.set_impl(TestNfviMetricsCache.Plugin())
+
+        self.vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+        self.vdur.id = "test-vdur-id"
+        self.vdur.vim_id = "test-vim-id"
+        self.vdur.vm_flavor.vcpu_count = 4
+        self.vdur.vm_flavor.memory_mb = 1
+        self.vdur.vm_flavor.storage_gb = 1
+
+    def test_create_destroy_entry(self):
+        cache = NfviMetricsCache(self.logger, self.loop, self.plugin_manager)
+        self.assertEqual(len(cache._nfvi_metrics), 0)
+
+        cache.create_entry(self.account, self.vdur)
+        self.assertEqual(len(cache._nfvi_metrics), 1)
+
+        cache.destroy_entry(self.vdur.id)
+        self.assertEqual(len(cache._nfvi_metrics), 0)
+
+    def test_retrieve(self):
+        NfviMetrics.SAMPLE_INTERVAL = 1
+
+        cache = NfviMetricsCache(self.logger, self.loop, self.plugin_manager)
+        cache.create_entry(self.account, self.vdur)
+
+        @wait_for_pending_tasks(self.loop)
+        @asyncio.coroutine
+        def retrieve_metrics():
+            metrics = cache.retrieve("test-vim-id")
+            self.assertEqual(metrics.vcpu.utilization, 0.0)
+
+            yield from asyncio.sleep(NfviMetrics.SAMPLE_INTERVAL, loop=self.loop)
+
+            metrics = cache.retrieve("test-vim-id")
+            self.assertEqual(metrics.vcpu.utilization, 0.5)
+
+        self.loop.run_until_complete(retrieve_metrics())
+
+    def test_id_mapping(self):
+        cache = NfviMetricsCache(self.logger, self.loop, self.plugin_manager)
+
+        cache.create_entry(self.account, self.vdur)
+
+        self.assertEqual(cache.to_vim_id(self.vdur.id), self.vdur.vim_id)
+        self.assertEqual(cache.to_vdur_id(self.vdur.vim_id), self.vdur.id)
+        self.assertTrue(cache.contains_vdur_id(self.vdur.id))
+        self.assertTrue(cache.contains_vim_id(self.vdur.vim_id))
+
+        cache.destroy_entry(self.vdur.id)
+
+        self.assertFalse(cache.contains_vdur_id(self.vdur.id))
+        self.assertFalse(cache.contains_vim_id(self.vdur.vim_id))
+
+
+class TestNfviMetrics(unittest.TestCase):
+    class Plugin(object):
+        def nfvi_metrics_available(self, cloud_account):
+            return True
+
+        def nfvi_metrics(self, account, vim_id):
+            metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+            metrics.vcpu.utilization = 0.5
+            return None, metrics
+
+    def setUp(self):
+        self.loop = asyncio.new_event_loop()
+        self.account = RwcalYang.CloudAccount(
+                name='test-cloud-account',
+                account_type="mock",
+                )
+
+        self.plugin = TestNfviMetrics.Plugin()
+        self.logger = logging.getLogger('test-logger')
+
+        self.vdur = make_vdur()
+        self.vdur.vm_flavor.vcpu_count = 4
+        self.vdur.vm_flavor.memory_mb = 100
+        self.vdur.vm_flavor.storage_gb = 2
+        self.vdur.vim_id = 'test-vim-id'
+
+    def test_update(self):
+        nfvi_metrics = NfviMetrics(
+                self.logger,
+                self.loop,
+                self.account,
+                self.plugin,
+                self.vdur,
+                )
+
+        # Reduce the SAMPLE_INTERVAL so that the test does not take a long time
+        nfvi_metrics.SAMPLE_INTERVAL = 1
+
+        # The metrics have never been retrieved so they should be updated
+        self.assertTrue(nfvi_metrics.should_update())
+
+        # The metrics return will be empty because the cache version is empty.
+        # However, this trigger an update to retrieve metrics from the plugin.
+        metrics = nfvi_metrics.retrieve()
+        self.assertEqual(metrics.vcpu.utilization, 0.0)
+
+        # An update has been trigger by the retrieve call so additional updates
+        # should not happen
+        self.assertFalse(nfvi_metrics.should_update())
+        self.assertFalse(nfvi_metrics._updating.done())
+
+        # Allow the event loop to run until the update is complete
+        @asyncio.coroutine
+        @wait_for_pending_tasks(self.loop)
+        def wait_for_update():
+            yield from asyncio.wait_for(
+                    nfvi_metrics._updating,
+                    timeout=2,
+                    loop=self.loop,
+                    )
+
+        self.loop.run_until_complete(wait_for_update())
+
+        # Check that we have a new metrics object
+        metrics = nfvi_metrics.retrieve()
+        self.assertEqual(metrics.vcpu.utilization, 0.5)
+
+        # We have just updated the metrics so it should be unnecessary to update
+        # right now
+        self.assertFalse(nfvi_metrics.should_update())
+        self.assertTrue(nfvi_metrics._updating.done())
+
+        # Wait an amount of time equal to the SAMPLE_INTERVAL. This ensures
+        # that the metrics that were just retrieved become stale...
+        time.sleep(NfviMetrics.SAMPLE_INTERVAL)
+
+        # ...now it is time to update again
+        self.assertTrue(nfvi_metrics.should_update())
+
+
+class TestNfviInterface(unittest.TestCase):
+    class NfviPluginImpl(object):
+        def __init__(self):
+            self._alarms = set()
+
+        def nfvi_metrics(self, account, vm_id):
+            return rwmon.NfviMetrics()
+
+        def nfvi_metrics_available(self, account):
+            return True
+
+        def alarm_create(self, account, vim_id, alarm):
+            alarm.alarm_id = str(uuid.uuid4())
+            self._alarms.add(alarm.alarm_id)
+            return RwTypes.RwStatus.SUCCESS
+
+        def alarm_delete(self, account, alarm_id):
+            self._alarms.remove(alarm_id)
+            return RwTypes.RwStatus.SUCCESS
+
+    def setUp(self):
+        self.loop = asyncio.new_event_loop()
+        self.logger = logging.getLogger('test-logger')
+
+        self.account = RwcalYang.CloudAccount(
+                name='test-cloud-account',
+                account_type="mock",
+                )
+
+        # Define the VDUR to avoid division by zero
+        self.vdur = make_vdur()
+        self.vdur.vm_flavor.vcpu_count = 4
+        self.vdur.vm_flavor.memory_mb = 100
+        self.vdur.vm_flavor.storage_gb = 2
+        self.vdur.vim_id = 'test-vim-id'
+
+        self.plugin_manager = NfviMetricsPluginManager(self.logger)
+        self.plugin_manager.register(self.account, "mock")
+
+        self.cache = NfviMetricsCache(
+                self.logger,
+                self.loop,
+                self.plugin_manager,
+                )
+
+        self.nfvi_interface = NfviInterface(
+                self.loop,
+                self.logger,
+                self.plugin_manager,
+                self.cache
+                )
+
+    def test_nfvi_metrics_available(self):
+        self.assertTrue(self.nfvi_interface.nfvi_metrics_available(self.account))
+
+    def test_retrieve(self):
+        pass
+
+    def test_alarm_create_and_destroy(self):
+        alarm = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_Alarms()
+        alarm.name = "test-alarm"
+        alarm.description = "test-description"
+        alarm.vdur_id = "test-vdur-id"
+        alarm.metric = "CPU_UTILIZATION"
+        alarm.statistic = "MINIMUM"
+        alarm.operation = "GT"
+        alarm.value = 0.1
+        alarm.period = 10
+        alarm.evaluations = 1
+
+        plugin_impl = TestNfviInterface.NfviPluginImpl()
+        plugin = self.plugin_manager.plugin(self.account.name)
+        plugin.set_impl(plugin_impl)
+
+        self.assertEqual(len(plugin_impl._alarms), 0)
+
+        @asyncio.coroutine
+        @wait_for_pending_tasks(self.loop)
+        def wait_for_create():
+            coro = self.nfvi_interface.alarm_create(
+                    self.account,
+                    "test-vim-id",
+                    alarm,
+                    )
+            yield from asyncio.wait_for(
+                    coro,
+                    timeout=2,
+                    loop=self.loop,
+                    )
+
+        self.loop.run_until_complete(wait_for_create())
+        self.assertEqual(len(plugin_impl._alarms), 1)
+        self.assertTrue(alarm.alarm_id is not None)
+
+        @asyncio.coroutine
+        @wait_for_pending_tasks(self.loop)
+        def wait_for_destroy():
+            coro = self.nfvi_interface.alarm_destroy(
+                    self.account,
+                    alarm.alarm_id,
+                    )
+            yield from asyncio.wait_for(
+                    coro,
+                    timeout=2,
+                    loop=self.loop,
+                    )
+
+        self.loop.run_until_complete(wait_for_destroy())
+        self.assertEqual(len(plugin_impl._alarms), 0)
+
+
+class TestVdurNfviMetrics(unittest.TestCase):
+    def setUp(self):
+        # Reduce the sample interval so that test run quickly
+        NfviMetrics.SAMPLE_INTERVAL = 0.1
+
+        # Create a mock plugin to define the metrics retrieved. The plugin will
+        # return a VCPU utilization of 0.5.
+        class MockPlugin(object):
+            def __init__(self):
+                self.metrics = RwmonYang.NfviMetrics()
+
+            def nfvi_metrics(self, account, vim_id):
+                self.metrics.vcpu.utilization = 0.5
+                return self.metrics
+
+        self.loop = asyncio.get_event_loop()
+        self.logger = logging.getLogger('test-logger')
+
+        self.account = RwcalYang.CloudAccount(
+                name='test-cloud-account',
+                account_type="mock",
+                )
+
+        # Define the VDUR to avoid division by zero
+        vdur = make_vdur()
+        vdur.vm_flavor.vcpu_count = 4
+        vdur.vm_flavor.memory_mb = 100
+        vdur.vm_flavor.storage_gb = 2
+        vdur.vim_id = 'test-vim-id'
+
+        # Instantiate the mock plugin
+        self.plugin_manager = NfviMetricsPluginManager(self.logger)
+        self.plugin_manager.register(self.account, "mock")
+
+        self.plugin = self.plugin_manager.plugin(self.account.name)
+        self.plugin.set_impl(MockPlugin())
+
+        self.cache = NfviMetricsCache(
+                self.logger,
+                self.loop,
+                self.plugin_manager,
+                )
+
+        self.manager = NfviInterface(
+                self.loop,
+                self.logger,
+                self.plugin_manager,
+                self.cache,
+                )
+
+        self.metrics = NfviMetrics(
+                self.logger,
+                self.loop,
+                self.account,
+                self.plugin,
+                vdur,
+                )
+
+    def test_retrieval(self):
+        metrics_a = None
+        metrics_b = None
+
+        # Define a coroutine that can be added to the asyncio event loop
+        @asyncio.coroutine
+        def update():
+            # Output from the metrics calls with be written to these nonlocal
+            # variables
+            nonlocal metrics_a
+            nonlocal metrics_b
+
+            # This first call will return the current metrics values and
+            # schedule a request to the NFVI to retrieve metrics from the data
+            # source. All metrics will be zero at this point.
+            metrics_a = self.metrics.retrieve()
+
+            # Wait for the scheduled update to take effect
+            yield from asyncio.sleep(0.2, loop=self.loop)
+
+            # Retrieve the updated metrics
+            metrics_b = self.metrics.retrieve()
+
+        self.loop.run_until_complete(update())
+
+        # Check that the metrics returned indicate that the plugin was queried
+        # and returned the appropriate value, i.e. 0.5 utilization
+        self.assertEqual(0.0, metrics_a.vcpu.utilization)
+        self.assertEqual(0.5, metrics_b.vcpu.utilization)
+
+
+class TestNfviMetricsPluginManager(unittest.TestCase):
+    def setUp(self):
+        self.logger = logging.getLogger('test-logger')
+        self.plugins = NfviMetricsPluginManager(self.logger)
+        self.account = RwcalYang.CloudAccount(
+                name='test-cloud-account',
+                account_type="mock",
+                )
+
+    def test_mock_plugin(self):
+        # Register an account name with a mock plugin. If successful, the
+        # plugin manager should return a non-None object.
+        self.plugins.register(self.account, 'mock')
+        self.assertIsNotNone(self.plugins.plugin(self.account.name))
+
+        # Now unregister the cloud account
+        self.plugins.unregister(self.account.name)
+
+        # Trying to retrieve a plugin for a cloud account that has not been
+        # registered with the manager is expected to raise an exception.
+        with self.assertRaises(KeyError):
+            self.plugins.plugin(self.account.name)
+
+    def test_multiple_registration(self):
+        self.plugins.register(self.account, 'mock')
+
+        # Attempting to register the account with another type of plugin will
+        # also cause an exception to be raised.
+        with self.assertRaises(AccountAlreadyRegisteredError):
+            self.plugins.register(self.account, 'mock')
+
+        # Attempting to register the account with 'openstack' again with cause
+        # an exception to be raised.
+        with self.assertRaises(AccountAlreadyRegisteredError):
+            self.plugins.register(self.account, 'openstack')
+
+    def test_unsupported_plugin(self):
+        # If an attempt is made to register a cloud account with an unknown
+        # type of plugin, a PluginNotSupportedError should be raised.
+        with self.assertRaises(PluginNotSupportedError):
+            self.plugins.register(self.account, 'unsupported-plugin')
+
+    def test_anavailable_plugin(self):
+        # Create a factory that always raises PluginUnavailableError
+        class UnavailablePluginFactory(PluginFactory):
+            PLUGIN_NAME = "unavailable-plugin"
+
+            def create(self, cloud_account):
+                raise PluginUnavailableError()
+
+        # Register the factory
+        self.plugins.register_plugin_factory(UnavailablePluginFactory())
+
+        # Ensure that the correct exception propagates when the cloud account
+        # is registered.
+        with self.assertRaises(PluginUnavailableError):
+            self.plugins.register(self.account, "unavailable-plugin")
+
+
+class TestMonitor(unittest.TestCase):
+    """
+    The Monitor class is the implementation that is called by the
+    MonitorTasklet. It provides the unified interface for controlling and
+    querying the monitoring functionality.
+    """
+
+    def setUp(self):
+        # Reduce the sample interval so that test run quickly
+        NfviMetrics.SAMPLE_INTERVAL = 0.1
+
+        self.loop = asyncio.get_event_loop()
+        self.logger = logging.getLogger('test-logger')
+        self.config = InstanceConfiguration()
+        self.monitor = Monitor(self.loop, self.logger, self.config)
+
+        self.account = RwcalYang.CloudAccount(
+                name='test-cloud-account',
+                account_type="mock",
+                )
+
+    def test_instance_config(self):
+        """
+        Configuration data for an instance is pass to the Monitor when it is
+        created. The data is passed in the InstanceConfiguration object. This
+        object is typically shared between the tasklet and the monitor, and
+        provides a way for the tasklet to update the configuration of the
+        monitor.
+        """
+        self.assertTrue(hasattr(self.monitor._config, "polling_period"))
+        self.assertTrue(hasattr(self.monitor._config, "min_cache_lifetime"))
+        self.assertTrue(hasattr(self.monitor._config, "max_polling_frequency"))
+
+    def test_monitor_cloud_accounts(self):
+        """
+        This test checks the cloud accounts are correctly added and deleted,
+        and that the correct exceptions are raised on duplicate adds or
+        deletes.
+
+        """
+        # Add the cloud account to the monitor
+        self.monitor.add_cloud_account(self.account)
+        self.assertIn(self.account.name, self.monitor._cloud_accounts)
+
+        # Add the cloud account to the monitor again
+        with self.assertRaises(AccountAlreadyRegisteredError):
+            self.monitor.add_cloud_account(self.account)
+
+        # Delete the cloud account
+        self.monitor.remove_cloud_account(self.account.name)
+        self.assertNotIn(self.account.name, self.monitor._cloud_accounts)
+
+        # Delete the cloud account again
+        with self.assertRaises(UnknownAccountError):
+            self.monitor.remove_cloud_account(self.account.name)
+
+    def test_monitor_cloud_accounts_illegal_removal(self):
+        """
+        A cloud account may not be removed while there are plugins or records
+        that are associated with it. Attempting to delete such a cloud account
+        will raise an exception.
+        """
+        # Add the cloud account to the monitor
+        self.monitor.add_cloud_account(self.account)
+
+        # Create a VNFR associated with the cloud account
+        vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+        vnfr.cloud_account = self.account.name
+        vnfr.id = 'test-vnfr-id'
+
+        # Add a VDUR to the VNFR
+        vdur = vnfr.vdur.add()
+        vdur.vim_id = 'test-vim-id-1'
+        vdur.id = 'test-vdur-id-1'
+
+        # Now add the VNFR to the monitor
+        self.monitor.add_vnfr(vnfr)
+
+        # Check that the monitor contains the VNFR, VDUR, and metrics
+        self.assertTrue(self.monitor.is_registered_vdur(vdur.id))
+        self.assertTrue(self.monitor.is_registered_vnfr(vnfr.id))
+        self.assertEqual(1, len(self.monitor.metrics))
+
+        # Deleting the cloud account now should raise an exception because the
+        # VNFR and VDUR are associated with the cloud account.
+        with self.assertRaises(AccountInUseError):
+            self.monitor.remove_cloud_account(self.account.name)
+
+        # Now remove the VNFR from the monitor
+        self.monitor.remove_vnfr(vnfr.id)
+        self.assertFalse(self.monitor.is_registered_vdur(vdur.id))
+        self.assertFalse(self.monitor.is_registered_vnfr(vnfr.id))
+        self.assertEqual(0, len(self.monitor.metrics))
+
+        # Safely delete the cloud account
+        self.monitor.remove_cloud_account(self.account.name)
+
+    def test_vdur_registration(self):
+        """
+        When a VDUR is registered with the Monitor it is registered with the
+        VdurNfviMetricsManager. Thus it is assigned a plugin that can be used
+        to retrieve the NFVI metrics associated with the VDU.
+        """
+        # Define the VDUR to be registered
+        vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+        vdur.vm_flavor.vcpu_count = 4
+        vdur.vm_flavor.memory_mb = 100
+        vdur.vm_flavor.storage_gb = 2
+        vdur.vim_id = 'test-vim-id'
+        vdur.id = 'test-vdur-id'
+
+        # Before registering the VDUR, the cloud account needs to be added to
+        # the monitor.
+        self.monitor.add_cloud_account(self.account)
+
+        # Register the VDUR with the monitor
+        self.monitor.add_vdur(self.account, vdur)
+        self.assertTrue(self.monitor.is_registered_vdur(vdur.id))
+
+        # Check that the VDUR has been added to the metrics cache
+        self.assertTrue(self.monitor.cache.contains_vdur_id(vdur.id))
+
+        # Unregister the VDUR
+        self.monitor.remove_vdur(vdur.id)
+        self.assertFalse(self.monitor.is_registered_vdur(vdur.id))
+
+        # Check that the VDUR has been removed from the metrics cache
+        self.assertFalse(self.monitor.cache.contains_vdur_id(vdur.id))
+
+    def test_vnfr_add_update_delete(self):
+        """
+        When a VNFR is added to the Monitor a record is created of the
+        relationship between the VNFR and any VDURs that it contains. Each VDUR
+        is then registered with the VdurNfviMetricsManager. A VNFR can also be
+        updated so that it contains more of less VDURs. Any VDURs that are
+        added to the VNFR are registered with the NdurNfviMetricsManager, and
+        any that are removed are unregistered. When a VNFR is deleted, all of
+        the VDURs contained in the VNFR are unregistered.
+        """
+        # Define the VDUR to be registered
+        vdur = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+        vdur.vim_id = 'test-vim-id-1'
+        vdur.id = 'test-vdur-id-1'
+
+        vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+        vnfr.cloud_account = self.account.name
+        vnfr.id = 'test-vnfr-id'
+
+        vnfr.vdur.append(vdur)
+
+        self.monitor.add_cloud_account(self.account)
+
+        # Add the VNFR to the monitor. This will also register VDURs contained
+        # in the VNFR with the monitor.
+        self.monitor.add_vnfr(vnfr)
+        self.assertTrue(self.monitor.is_registered_vdur('test-vdur-id-1'))
+
+        # Add another VDUR to the VNFR and update the monitor. Both VDURs
+        # should now be registered
+        vdur = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+        vdur.vim_id = 'test-vim-id-2'
+        vdur.id = 'test-vdur-id-2'
+
+        vnfr.vdur.append(vdur)
+
+        self.monitor.update_vnfr(vnfr)
+        self.assertTrue(self.monitor.is_registered_vdur('test-vdur-id-1'))
+        self.assertTrue(self.monitor.is_registered_vdur('test-vdur-id-2'))
+
+        # Delete the VNFR from the monitor. This should remove the VNFR and all
+        # of the associated VDURs from the monitor.
+        self.monitor.remove_vnfr(vnfr.id)
+        self.assertFalse(self.monitor.is_registered_vnfr('test-vnfr-id'))
+        self.assertFalse(self.monitor.is_registered_vdur('test-vdur-id-1'))
+        self.assertFalse(self.monitor.is_registered_vdur('test-vdur-id-2'))
+
+        with self.assertRaises(KeyError):
+            self.monitor.retrieve_nfvi_metrics('test-vdur-id-1')
+
+        with self.assertRaises(KeyError):
+            self.monitor.retrieve_nfvi_metrics('test-vdur-id-2')
+
+    def test_complete(self):
+        """
+        This test simulates the addition of a VNFR to the Monitor (along with
+        updates), and retrieves NFVI metrics from the VDUR. The VNFR is then
+        deleted, which should result in a cleanup of all the data in the
+        Monitor.
+        """
+        # Create the VNFR
+        vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+        vnfr.cloud_account = self.account.name
+        vnfr.id = 'test-vnfr-id'
+
+        # Create 2 VDURs
+        vdur = vnfr.vdur.add()
+        vdur.id = 'test-vdur-id-1'
+        vdur.vim_id = 'test-vim-id-1'
+        vdur.vm_flavor.vcpu_count = 4
+        vdur.vm_flavor.memory_mb = 100
+        vdur.vm_flavor.storage_gb = 2
+
+        vdur = vnfr.vdur.add()
+        vdur.id = 'test-vdur-id-2'
+        vdur.vim_id = 'test-vim-id-2'
+        vdur.vm_flavor.vcpu_count = 4
+        vdur.vm_flavor.memory_mb = 100
+        vdur.vm_flavor.storage_gb = 2
+
+        class MockPlugin(object):
+            def __init__(self):
+                self._metrics = dict()
+                self._metrics['test-vim-id-1'] = RwmonYang.NfviMetrics()
+                self._metrics['test-vim-id-2'] = RwmonYang.NfviMetrics()
+
+            def nfvi_metrics(self, account, vim_id):
+                metrics = self._metrics[vim_id]
+
+                if vim_id == 'test-vim-id-1':
+                    metrics.memory.used += 1000
+                else:
+                    metrics.memory.used += 2000
+
+                return metrics
+
+        class MockFactory(PluginFactory):
+            PLUGIN_NAME = "mock"
+
+            def create(self, cloud_account):
+                plugin = rw_peas.PeasPlugin("rwmon_mock", 'RwMon-1.0')
+                impl = plugin.get_interface("Monitoring")
+                impl.set_impl(MockPlugin())
+                return impl
+
+        # Modify the mock plugin factory
+        self.monitor._nfvi_plugins._factories["mock"] = MockFactory()
+
+        # Add the cloud account the monitor
+        self.monitor.add_cloud_account(self.account)
+
+        # Add the VNFR to the monitor.
+        self.monitor.add_vnfr(vnfr)
+
+        @wait_for_pending_tasks(self.loop)
+        @asyncio.coroutine
+        def call1():
+            # call #1 (time = 0.00s)
+            # The metrics for these VDURs have not been populated yet so a
+            # default metrics object (all zeros) is returned, and a request is
+            # scheduled with the data source to retrieve the metrics.
+            metrics1 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-1')
+            metrics2 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-2')
+
+            self.assertEqual(0, metrics1.memory.used)
+            self.assertEqual(0, metrics2.memory.used)
+
+        self.loop.run_until_complete(call1())
+
+        @wait_for_pending_tasks(self.loop)
+        @asyncio.coroutine
+        def call2():
+            # call #2 (wait 0.05s)
+            # The metrics have been populated with data from the data source
+            # due to the request made during call #1.
+            yield from asyncio.sleep(0.05)
+
+            metrics1 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-1')
+            metrics2 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-2')
+
+            self.assertEqual(1000, metrics1.memory.used)
+            self.assertEqual(2000, metrics2.memory.used)
+
+        self.loop.run_until_complete(call2())
+
+        @wait_for_pending_tasks(self.loop)
+        @asyncio.coroutine
+        def call3():
+            # call #3 (wait 0.50s)
+            # This call exceeds 0.1s (the sample interval of the plugin)
+            # from when the data was retrieved. The cached metrics are
+            # immediately returned, but a request is made to the data source to
+            # refresh these metrics.
+            yield from asyncio.sleep(0.10)
+
+            metrics1 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-1')
+            metrics2 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-2')
+
+            self.assertEqual(1000, metrics1.memory.used)
+            self.assertEqual(2000, metrics2.memory.used)
+
+        self.loop.run_until_complete(call3())
+
+        @wait_for_pending_tasks(self.loop)
+        @asyncio.coroutine
+        def call4():
+            # call #4 (wait 1.00s)
+            # The metrics retrieved differ from those in call #3 because the
+            # cached metrics have been updated.
+            yield from asyncio.sleep(0.10)
+            metrics1 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-1')
+            metrics2 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-2')
+
+            self.assertEqual(2000, metrics1.memory.used)
+            self.assertEqual(4000, metrics2.memory.used)
+
+        self.loop.run_until_complete(call4())
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+
+    args = parser.parse_args(argv)
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # Set the logger in this test to use a null handler
+    logging.getLogger('test-logger').addHandler(logging.NullHandler())
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + argv,
+            testRunner=xmlrunner.XMLTestRunner(
+                output=os.environ["RIFT_MODULE_TEST"]))
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/test/utest_rwnsm.py b/rwlaunchpad/test/utest_rwnsm.py
new file mode 100755
index 0000000..e125739
--- /dev/null
+++ b/rwlaunchpad/test/utest_rwnsm.py
@@ -0,0 +1,215 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import logging
+import os
+import sys
+import unittest
+import uuid
+import xmlrunner
+
+from gi.repository import (
+        NsdYang,
+        NsrYang,
+        )
+
+logger = logging.getLogger('test-rwnsmtasklet')
+
+import rift.tasklets.rwnsmtasklet.rwnsmtasklet as rwnsmtasklet
+import rift.tasklets.rwnsmtasklet.xpath as rwxpath
+
+class TestGiXpath(unittest.TestCase):
+    def setUp(self):
+        rwxpath.reset_cache()
+
+    def test_nsd_elements(self):
+        """
+        Test that a particular element in a list is corerctly retrieved. In
+        this case, we are trying to retrieve an NSD from the NSD catalog.
+
+        """
+        # Create the initial NSD catalog
+        nsd_catalog = NsdYang.YangData_Nsd_NsdCatalog()
+
+        # Create an NSD, set its 'id', and add it to the catalog
+        nsd_id = str(uuid.uuid4())
+        nsd_catalog.nsd.append(
+                NsdYang.YangData_Nsd_NsdCatalog_Nsd(
+                    id=nsd_id,
+                    )
+                )
+
+        # Retrieve the NSD using and xpath expression
+        xpath = '/nsd:nsd-catalog/nsd:nsd[nsd:id={}]'.format(nsd_id)
+        nsd = rwxpath.getxattr(nsd_catalog, xpath)
+
+        self.assertEqual(nsd_id, nsd.id)
+
+        # Modified the name of the NSD using an xpath expression
+        rwxpath.setxattr(nsd_catalog, xpath + "/nsd:name", "test-name")
+
+        name = rwxpath.getxattr(nsd_catalog, xpath + "/nsd:name")
+        self.assertEqual("test-name", name)
+
+    def test_nsd_scalar_fields(self):
+        """
+        Test that setxattr correctly sets the value specified by an xpath.
+
+        """
+        # Define a simple NSD
+        nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+
+        # Check that the unset fields are in fact set to None
+        self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name"))
+        self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name"))
+
+        # Set the values of the 'name' and 'short-name' fields
+        rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name", "test-name")
+        rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name", "test-short-name")
+
+        # Check that the 'name' and 'short-name' fields are correctly set
+        self.assertEqual(nsd.name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name"))
+        self.assertEqual(nsd.short_name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name"))
+
+
+class TestInputParameterSubstitution(unittest.TestCase):
+    def setUp(self):
+        self.substitute_input_parameters = rwnsmtasklet.InputParameterSubstitution(logger)
+
+    def test_null_arguments(self):
+        """
+        If None is passed to the substitutor for either the NSD or the NSR
+        config, no exception should be raised.
+
+        """
+        nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+        nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+
+        self.substitute_input_parameters(None, None)
+        self.substitute_input_parameters(nsd, None)
+        self.substitute_input_parameters(None, nsr_config)
+
+    def test_illegal_input_parameter(self):
+        """
+        In the NSD there is a list of the parameters that are allowed to be
+        sbustituted by input parameters. This test checks that when an input
+        parameter is provided in the NSR config that is not in the NSD, it is
+        not applied.
+
+        """
+        # Define the original NSD
+        nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+        nsd.name = "robert"
+        nsd.short_name = "bob"
+
+        # Define which parameters may be modified
+        nsd.input_parameter_xpath.append(
+                NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+                    xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
+                    label="NSD Name",
+                    )
+                )
+
+        # Define the input parameters that are intended to be modified
+        nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+        nsr_config.input_parameter.extend([
+            NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
+                value="alice",
+                ),
+            NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
+                value="alice",
+                ),
+            ])
+
+        self.substitute_input_parameters(nsd, nsr_config)
+
+        # Verify that only the parameter in the input_parameter_xpath list is
+        # modified after the input parameters have been applied.
+        self.assertEqual("alice", nsd.name)
+        self.assertEqual("bob", nsd.short_name)
+
+    def test_substitution(self):
+        """
+        Test that substitution of input parameters occurs as expected.
+
+        """
+        # Define the original NSD
+        nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+        nsd.name = "robert"
+        nsd.short_name = "bob"
+
+        # Define which parameters may be modified
+        nsd.input_parameter_xpath.extend([
+                NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+                    xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
+                    label="NSD Name",
+                    ),
+                NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+                    xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
+                    label="NSD Short Name",
+                    ),
+                ])
+
+        # Define the input parameters that are intended to be modified
+        nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+        nsr_config.input_parameter.extend([
+            NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
+                value="robert",
+                ),
+            NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
+                value="bob",
+                ),
+            ])
+
+        self.substitute_input_parameters(nsd, nsr_config)
+
+        # Verify that both the 'name' and 'short-name' fields are correctly
+        # replaced.
+        self.assertEqual("robert", nsd.name)
+        self.assertEqual("bob", nsd.short_name)
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+
+    args = parser.parse_args(argv)
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.FATAL)
+
+    # Make the test logger very quiet
+    logger.addHandler(logging.NullHandler())
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + argv,
+            testRunner=xmlrunner.XMLTestRunner(
+                output=os.environ["RIFT_MODULE_TEST"]))
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/test/utest_scaling_rpc.py b/rwlaunchpad/test/utest_scaling_rpc.py
new file mode 100644
index 0000000..b2290af
--- /dev/null
+++ b/rwlaunchpad/test/utest_scaling_rpc.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import asyncio
+import os
+import sys
+import unittest
+import uuid
+import xmlrunner
+import argparse
+import logging
+import time
+import types
+
+import gi
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('NsrYang', '1.0')
+gi.require_version('RwlogMgmtYang', '1.0')
+
+from gi.repository import (
+    RwCloudYang as rwcloudyang,
+    RwDts as rwdts,
+    RwLaunchpadYang as launchpadyang,
+    RwNsmYang as rwnsmyang,
+    RwNsrYang as rwnsryang,
+    NsrYang as nsryang,
+    RwResourceMgrYang as rmgryang,
+    RwcalYang as rwcalyang,
+    RwConfigAgentYang as rwcfg_agent,
+    RwlogMgmtYang
+)
+
+from gi.repository.RwTypes import RwStatus
+import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
+import rift.tasklets
+import rift.test.dts
+import rw_peas
+
+
+
+
+class ManoTestCase(rift.test.dts.AbstractDTSTest):
+    """
+    DTS GI interface unittests
+
+    Note:  Each tests uses a list of asyncio.Events for staging through the
+    test.  These are required here because we are bring up each coroutine
+    ("tasklet") at the same time and are not implementing any re-try
+    mechanisms.  For instance, this is used in numerous tests to make sure that
+    a publisher is up and ready before the subscriber sends queries.  Such
+    event lists should not be used in production software.
+    """
+
+    @classmethod
+    def configure_suite(cls, rwmain):
+        nsm_dir = os.environ.get('NSM_DIR')
+
+        rwmain.add_tasklet(nsm_dir, 'rwnsmtasklet')
+
+    @classmethod
+    def configure_schema(cls):
+        return rwnsmyang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    @staticmethod
+    def get_cal_account(account_type, account_name):
+        """
+        Creates an object for class RwcalYang.Clo
+        """
+        account = rwcloudyang.CloudAccount()
+        if account_type == 'mock':
+            account.name          = account_name
+            account.account_type  = "mock"
+            account.mock.username = "mock_user"
+        elif ((account_type == 'openstack_static') or (account_type == 'openstack_dynamic')):
+            account.name = account_name
+            account.account_type = 'openstack'
+            account.openstack.key = openstack_info['username']
+            account.openstack.secret       = openstack_info['password']
+            account.openstack.auth_url     = openstack_info['auth_url']
+            account.openstack.tenant       = openstack_info['project_name']
+            account.openstack.mgmt_network = openstack_info['mgmt_network']
+        return account
+
+    @asyncio.coroutine
+    def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1"):
+        account = self.get_cal_account(cloud_type, cloud_name)
+        account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name)
+        self.log.info("Configuring cloud-account: %s", account)
+        yield from dts.query_create(account_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    account)
+
+    @asyncio.coroutine
+    def wait_tasklets(self):
+        yield from asyncio.sleep(5, loop=self.loop)
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", self.id())
+        self.tinfo = self.new_tinfo(self.id())
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+
+    def test_create_nsr_record(self):
+
+        @asyncio.coroutine
+        def run_test():
+            yield from self.wait_tasklets()
+
+            cloud_type = "mock"
+            yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account")
+
+
+            # Trigger an rpc
+            rpc_ip = nsryang.YangInput_Nsr_ExecScaleIn.from_dict({
+                'nsr_id_ref': '1',
+                'instance_id': "1",
+                'scaling_group_name_ref': "foo"})
+
+            yield from self.dts.query_rpc("/nsr:exec-scale-in", 0, rpc_ip)
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+
+def main():
+    top_dir = __file__[:__file__.find('/modules/core/')]
+    build_dir = os.path.join(top_dir, '.build/modules/core/rwvx/src/core_rwvx-build')
+    launchpad_build_dir = os.path.join(top_dir, '.build/modules/core/mc/core_mc-build/rwlaunchpad')
+
+    if 'NSM_DIR' not in os.environ:
+        os.environ['NSM_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwnsm')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+
+    ManoTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
+
+# vim: sw=4