RIFT OSM R1 Initial Submission

Signed-off-by: Jeremy Mordkoff <jeremy.mordkoff@riftio.com>
diff --git a/rwlaunchpad/ra/pytest/conftest.py b/rwlaunchpad/ra/pytest/conftest.py
new file mode 100644
index 0000000..fc094fa
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/conftest.py
@@ -0,0 +1,131 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import pytest
+import os
+import subprocess
+import sys
+
+import rift.auto.log
+import rift.auto.session
+import rift.vcs.vcs
+import logging
+
+import gi
+gi.require_version('RwCloudYang', '1.0')
+
+from gi.repository import RwCloudYang
+
+@pytest.fixture(scope='session')
+def cloud_name_prefix():
+    '''fixture which returns the prefix used in cloud account names'''
+    return 'cloud'
+
+@pytest.fixture(scope='session')
+def cloud_account_name(cloud_name_prefix):
+    '''fixture which returns the name used to identify the cloud account'''
+    return '{prefix}-0'.format(prefix=cloud_name_prefix)
+
+@pytest.fixture(scope='session')
+def sdn_account_name():
+    '''fixture which returns the name used to identify the sdn account'''
+    return 'sdn-0'
+
+@pytest.fixture(scope='session')
+def sdn_account_type():
+    '''fixture which returns the account type used by the sdn account'''
+    return 'odl'
+
+@pytest.fixture(scope='session')
+def cloud_module():
+    '''Fixture containing the module which defines cloud account
+    Returns:
+        module to be used when configuring a cloud account
+    '''
+    return RwCloudYang
+
+@pytest.fixture(scope='session')
+def cloud_xpath():
+    '''Fixture containing the xpath that should be used to configure a cloud account
+    Returns:
+        xpath to be used when configure a cloud account
+    '''
+    return '/cloud/account'
+
+@pytest.fixture(scope='session')
+def cloud_accounts(cloud_module, cloud_name_prefix, cloud_host, cloud_user, cloud_tenants, cloud_type):
+    '''fixture which returns a list of CloudAccounts. One per tenant provided
+
+    Arguments:
+        cloud_module        - fixture: module defining cloud account
+        cloud_name_prefix   - fixture: name prefix used for cloud account
+        cloud_host          - fixture: cloud host address
+        cloud_user          - fixture: cloud account user key
+        cloud_tenants       - fixture: list of tenants to create cloud accounts on
+        cloud_type          - fixture: cloud account type
+
+    Returns:
+        A list of CloudAccounts
+    '''
+    accounts = []
+    for idx, cloud_tenant in enumerate(cloud_tenants):
+        cloud_account_name = "{prefix}-{idx}".format(prefix=cloud_name_prefix, idx=idx)
+
+        if cloud_type == 'lxc':
+            accounts.append(
+                    cloud_module.CloudAccount.from_dict({
+                        "name": cloud_account_name,
+                        "account_type": "cloudsim_proxy"})
+            )
+        elif cloud_type == 'openstack':
+            password = 'mypasswd'
+            auth_url = 'http://{cloud_host}:5000/v3/'.format(cloud_host=cloud_host)
+            mgmt_network = os.getenv('MGMT_NETWORK', 'private')
+            accounts.append(
+                    cloud_module.CloudAccount.from_dict({
+                        'name':  cloud_account_name,
+                        'account_type': 'openstack',
+                        'openstack': {
+                            'admin': True,
+                            'key': cloud_user,
+                            'secret': password,
+                            'auth_url': auth_url,
+                            'tenant': cloud_tenant,
+                            'mgmt_network': mgmt_network}})
+            )
+        elif cloud_type == 'mock':
+            accounts.append(
+                    cloud_module.CloudAccount.from_dict({
+                        "name": cloud_account_name,
+                        "account_type": "mock"})
+            )
+
+    return accounts
+
+
+@pytest.fixture(scope='session', autouse=True)
+def cloud_account(cloud_accounts):
+    '''fixture which returns an instance of CloudAccount
+
+    Arguments:
+        cloud_accounts - fixture: list of generated cloud accounts
+
+    Returns:
+        An instance of CloudAccount
+    '''
+    return cloud_accounts[0]
+
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py b/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py
new file mode 100644
index 0000000..a3c565b
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py
@@ -0,0 +1,139 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import gi
+import shlex
+import pytest
+import os
+import subprocess
+import tempfile
+
+from gi.repository import (
+    NsdYang,
+    NsrYang,
+    RwNsrYang,
+    RwVnfrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+@pytest.fixture(scope='session', autouse=True)
+def cloud_account_name(request):
+    '''fixture which returns the name used to identify the cloud account'''
+    return 'cloud-0'
+
+@pytest.fixture(scope='session')
+def launchpad_host(request, confd_host):
+    return confd_host
+
+@pytest.fixture(scope='session')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+@pytest.fixture(scope='session')
+def vnfr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VnfrYang)
+
+@pytest.fixture(scope='session')
+def rwvnfr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfrYang)
+
+@pytest.fixture(scope='session')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+@pytest.fixture(scope='session')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+@pytest.fixture(scope='session')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+@pytest.fixture(scope='session')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+@pytest.fixture(scope='session')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+@pytest.fixture(scope='session')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+@pytest.fixture(scope='session')
+def mvv_descr_dir(request):
+    """root-directory of descriptors files used for Multi-VM VNF"""
+    return os.path.join(
+        os.environ["RIFT_INSTALL"],
+        "demos/tests/multivm_vnf"
+        )
+
+@pytest.fixture(scope='session')
+def package_dir(request):
+    return tempfile.mkdtemp(prefix="mvv_")
+
+@pytest.fixture(scope='session')
+def trafgen_vnfd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --descriptor-type='vnfd' --format='xml' --infile='{infile}' --outdir='{outdir}'".format(
+            pkg_scr=package_gen_script,
+            outdir=package_dir,
+            infile=os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_trafgen_vnfd.xml'))
+    pkg_file = os.path.join(package_dir, 'multivm_trafgen_vnfd.tar.gz')
+    command = shlex.split(pkg_cmd)
+    print("Running the command arguments: %s" % command)
+    command = [package_gen_script,
+               "--descriptor-type", "vnfd",
+               "--format", "xml",
+               "--infile", "%s" % os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_trafgen_vnfd.xml'),
+               "--outdir", "%s" % package_dir]
+    print("Running new command arguments: %s" % command)
+    subprocess.check_call(command)
+    return pkg_file
+
+@pytest.fixture(scope='session')
+def trafsink_vnfd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --descriptor-type='vnfd' --format='xml' --infile='{infile}' --outdir='{outdir}'".format(
+            pkg_scr=package_gen_script,
+            outdir=package_dir,
+            infile=os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_trafsink_vnfd.xml'))
+    pkg_file = os.path.join(package_dir, 'multivm_trafsink_vnfd.tar.gz')
+    command = shlex.split(pkg_cmd)
+    print("Running the command arguments: %s" % command)
+    command = [package_gen_script,
+               "--descriptor-type", "vnfd",
+               "--format", "xml",
+               "--infile", "%s" % os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_trafsink_vnfd.xml'),
+               "--outdir", "%s" % package_dir]
+    print("Running new command arguments: %s" % command)
+    subprocess.check_call(command)
+    return pkg_file
+
+@pytest.fixture(scope='session')
+def slb_vnfd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --outdir {outdir} --infile {infile} --descriptor-type vnfd --format xml".format(
+            pkg_scr=package_gen_script,
+            outdir=package_dir,
+            infile=os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_slb_vnfd.xml'),
+            )
+    pkg_file = os.path.join(package_dir, 'multivm_slb_vnfd.tar.gz')
+    subprocess.check_call(shlex.split(pkg_cmd))
+    return pkg_file
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py
new file mode 100755
index 0000000..557518b
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py
@@ -0,0 +1,286 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_multi_vm_vnf_slb.py
+@author Karun Ganesharatnam (karun.ganesharatnam@riftio.com)
+@date 03/16/2016
+@brief Scriptable load-balancer test with multi-vm VNFs
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import shutil
+import subprocess
+import time
+import uuid
+
+from gi.repository import (
+    NsdYang,
+    NsrYang,
+    RwNsrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+import rift.auto.mano
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+@pytest.fixture(scope='module')
+def multi_vm_vnf_nsd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --outdir {outdir} --infile {infile} --descriptor-type nsd --format xml".format(
+            pkg_scr=package_gen_script,
+            outdir=package_dir,
+            infile=os.path.join(mvv_descr_dir, 'nsd/xml/multivm_tg_slb_ts_config_nsd.xml'),
+            )
+    pkg_file = os.path.join(package_dir, 'multivm_tg_slb_ts_config_nsd.tar.gz')
+    logger.debug("Generating NSD package: %s", pkg_file)
+    subprocess.check_call(shlex.split(pkg_cmd))
+    return pkg_file
+
+def create_nsr(nsd_id, input_param_list, cloud_account_name):
+    """
+    Create the NSR record object
+
+    Arguments:
+         nsd_id             -  NSD id
+         input_param_list - list of input-parameter objects
+
+    Return:
+         NSR object
+    """
+    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+
+    nsr.id = str(uuid.uuid4())
+    nsr.name = rift.auto.mano.resource_name(nsr.id)
+    nsr.short_name = "nsr_short_name"
+    nsr.description = "This is a description"
+    nsr.nsd_ref = nsd_id
+    nsr.admin_status = "ENABLED"
+    nsr.input_parameter.extend(input_param_list)
+    nsr.cloud_account = cloud_account_name
+
+    return nsr
+
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl --insecure -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=10, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete", transaction_id)
+    def check_status_onboard_status():
+        uri = 'http://%s:4567/api/upload/%s/state' % (host, transaction_id)
+        curl_cmd = 'curl --insecure {uri}'.format(
+                uri=uri
+                )
+        return subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    elapsed = 0
+    start = time.time()
+    while elapsed < timeout:
+        reply = check_status_onboard_status()
+        state = json.loads(reply)
+        if state["status"] == "success":
+            break
+
+        if state["status"] != "pending":
+            raise DescriptorOnboardError(state)
+
+        time.sleep(1)
+        elapsed = time.time() - start
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+    logger.info("Descriptor onboard was successful")
+
+
+@pytest.mark.setup('multivmvnf')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestMultiVmVnfSlb(object):
+    pkg_dir = None
+    @classmethod
+    def teardown_class(cls):
+        """ remove the temporary directory contains the descriptor packages
+        """
+        logger.debug("Removing the temporary package directory: %s", cls.pkg_dir)
+#         if not cls.pkg_dir is None:
+#            shutil.rmtree(cls.pkg_dir)
+
+    def test_onboard_trafgen_vnfd(self, logger, launchpad_host, vnfd_proxy, trafgen_vnfd_package_file):
+        TestMultiVmVnfSlb.pkg_dir = os.path.dirname(trafgen_vnfd_package_file)
+        logger.info("Onboarding trafgen vnfd package: %s", trafgen_vnfd_package_file)
+        trans_id = upload_descriptor(logger, trafgen_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should only be a single vnfd"
+        vnfd = vnfds[0]
+        assert vnfd.name == "multivm_trafgen_vnfd"
+
+    def test_onboard_trafsink_vnfd(self, logger, launchpad_host, vnfd_proxy, trafsink_vnfd_package_file):
+        TestMultiVmVnfSlb.pkg_dir = os.path.dirname(trafsink_vnfd_package_file)
+        logger.info("Onboarding trafsink vnfd package: %s", trafsink_vnfd_package_file)
+        trans_id = upload_descriptor(logger, trafsink_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should be two vnfds"
+        assert "multivm_trafsink_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_slb_vnfd(self, logger, launchpad_host, vnfd_proxy, slb_vnfd_package_file):
+        TestMultiVmVnfSlb.pkg_dir = os.path.dirname(slb_vnfd_package_file)
+        logger.info("Onboarding slb vnfd package: %s", slb_vnfd_package_file)
+        trans_id = upload_descriptor(logger, slb_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 3, "There should be two vnfds"
+        assert "multivm_slb_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_multi_vm_vnf_nsd(self, logger, launchpad_host, nsd_proxy, multi_vm_vnf_nsd_package_file):
+        logger.info("Onboarding tg_slb_ts nsd package: %s", multi_vm_vnf_nsd_package_file)
+        trans_id = upload_descriptor(logger, multi_vm_vnf_nsd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+        assert nsd.name == "multivm_tg_slb_ts_config_nsd"
+
+    def test_instantiate_multi_vm_vnf_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy, cloud_account_name):
+
+        def verify_input_parameters (running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (input_param.xpath == config_param.xpath and
+                    input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_nsr_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+        descr_value = "New NSD Description"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1= NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd.id, input_parameters, cloud_account_name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.id)
+        rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=360)
+
+
+@pytest.mark.teardown('multivmvnf')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestMultiVmVnfSlbTeardown(object):
+    def test_terminate_nsr(self, nsr_proxy, vnfr_proxy, rwnsr_proxy, logger):
+        """
+        Terminate the instance and check if the record is deleted.
+
+        Asserts:
+        1. NSR record is deleted from instance-config.
+
+        """
+        logger.debug("Terminating Multi VM VNF's NSR")
+
+        nsr_path = "/ns-instance-config"
+        nsr = rwnsr_proxy.get_config(nsr_path)
+
+        ping_pong = nsr.nsr[0]
+        rwnsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(ping_pong.id))
+        time.sleep(30)
+
+
+    def test_delete_records(self, nsd_proxy, vnfd_proxy):
+        """Delete the NSD & VNFD records
+
+        Asserts:
+            The records are deleted.
+        """
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            nsd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd_proxy.delete_config(xpath)
+
+        time.sleep(5)
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py
new file mode 100755
index 0000000..ca6e9b5
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py
@@ -0,0 +1,282 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_multi_vm_vnf_trafgen.py
+@author Karun Ganesharatnam (karun.ganesharatnam@riftio.com)
+@date 03/16/2016
+@brief Scriptable load-balancer test with multi-vm VNFs
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import shutil
+import subprocess
+import time
+import uuid
+
+from gi.repository import (
+    NsdYang,
+    NsrYang,
+    RwNsrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+import rift.auto.mano
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+@pytest.fixture(scope='module')
+def multi_vm_vnf_nsd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --descriptor-type='nsd' --format='xml' --infile='{infile}' --outdir='{outdir}'".format(
+            pkg_scr=package_gen_script,
+            infile=os.path.join(mvv_descr_dir, 'nsd/xml/multivm_tg_ts_config_nsd.xml'),
+            outdir=package_dir)
+    pkg_file = os.path.join(package_dir, 'multivm_tg_ts_config_nsd.tar.gz')
+    logger.debug("Generating NSD package: %s", pkg_file)
+    command = shlex.split(pkg_cmd)
+    print("Running the command arguments: %s" % command)
+    command = [package_gen_script,
+               "--descriptor-type", "nsd",
+               "--format", "xml",
+               "--infile", "%s" % os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_tg_ts_config_nsd.xml'),
+               "--outdir", "%s" % package_dir]
+    print("Running new command arguments: %s" % command)
+    subprocess.check_call(shlex.split(pkg_cmd))
+    return pkg_file
+
+def create_nsr(nsd_id, input_param_list, cloud_account_name):
+    """
+    Create the NSR record object
+
+    Arguments:
+         nsd_id             -  NSD id
+         input_param_list - list of input-parameter objects
+
+    Return:
+         NSR object
+    """
+    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+
+    nsr.id = str(uuid.uuid4())
+    nsr.name = rift.auto.mano.resource_name(nsr.id)
+    nsr.short_name = "nsr_short_name"
+    nsr.description = "This is a description"
+    nsr.nsd_ref = nsd_id
+    nsr.admin_status = "ENABLED"
+    nsr.input_parameter.extend(input_param_list)
+    nsr.cloud_account = cloud_account_name
+
+    return nsr
+
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl --insecure -F "descriptor=@{file}" http://{host}:4567/api/upload '.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=10, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete", transaction_id)
+    def check_status_onboard_status():
+        uri = 'http://%s:4567/api/upload/%s/state' % (host, transaction_id)
+        curl_cmd = 'curl --insecure {uri}'.format(
+                uri=uri
+                )
+        return subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    elapsed = 0
+    start = time.time()
+    while elapsed < timeout:
+        reply = check_status_onboard_status()
+        state = json.loads(reply)
+        if state["status"] == "success":
+            break
+
+        if state["status"] != "pending":
+            raise DescriptorOnboardError(state)
+
+        time.sleep(1)
+        elapsed = time.time() - start
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+    logger.info("Descriptor onboard was successful")
+
+
+@pytest.mark.setup('multivmvnf')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestMultiVmVnfTrafgenApp(object):
+    pkg_dir = None
+    @classmethod
+    def teardown_class(cls):
+        """ remove the temporary directory contains the descriptor packages
+        """
+        logger.debug("Removing the temporary package directory: %s", cls.pkg_dir)
+        if not cls.pkg_dir is None:
+            shutil.rmtree(cls.pkg_dir)
+
+    def test_onboard_trafgen_vnfd(self, logger, launchpad_host, vnfd_proxy, trafgen_vnfd_package_file):
+        TestMultiVmVnfTrafgenApp.pkg_dir = os.path.dirname(trafgen_vnfd_package_file)
+        logger.info("Onboarding trafgen vnfd package: %s", trafgen_vnfd_package_file)
+        trans_id = upload_descriptor(logger, trafgen_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should only be a single vnfd"
+        vnfd = vnfds[0]
+        assert vnfd.name == "multivm_trafgen_vnfd"
+
+    def test_onboard_trafsink_vnfd(self, logger, launchpad_host, vnfd_proxy, trafsink_vnfd_package_file):
+        TestMultiVmVnfTrafgenApp.pkg_dir = os.path.dirname(trafsink_vnfd_package_file)
+        logger.info("Onboarding trafsink vnfd package: %s", trafsink_vnfd_package_file)
+        trans_id = upload_descriptor(logger, trafsink_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should be two vnfds"
+        assert "multivm_trafsink_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_multi_vm_vnf_nsd(self, logger, launchpad_host, nsd_proxy, multi_vm_vnf_nsd_package_file):
+        logger.info("Onboarding tg_ts nsd package: %s", multi_vm_vnf_nsd_package_file)
+        trans_id = upload_descriptor(logger, multi_vm_vnf_nsd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+        assert nsd.name == "multivm_tg_ts_config_nsd"
+
+    def test_instantiate_multi_vm_vnf_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy, cloud_account_name):
+
+        def verify_input_parameters (running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (input_param.xpath == config_param.xpath and
+                    input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_nsr_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+        descr_value = "New NSD Description"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1= NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd.id, input_parameters, cloud_account_name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.id)
+        rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=360)
+
+
+@pytest.mark.teardown('multivmvnf')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestMultiVmVnfTrafgenAppTeardown(object):
+    def test_terminate_nsr(self, nsr_proxy, vnfr_proxy, rwnsr_proxy, logger):
+        """
+        Terminate the instance and check if the record is deleted.
+
+        Asserts:
+        1. NSR record is deleted from instance-config.
+
+        """
+        logger.debug("Terminating Multi VM VNF's NSR")
+
+        nsr_path = "/ns-instance-config"
+        nsr = rwnsr_proxy.get_config(nsr_path)
+
+        ping_pong = nsr.nsr[0]
+        rwnsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(ping_pong.id))
+        time.sleep(30)
+
+
+    def test_delete_records(self, nsd_proxy, vnfd_proxy):
+        """Delete the NSD & VNFD records
+
+        Asserts:
+            The records are deleted.
+        """
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            nsd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd_proxy.delete_config(xpath)
+
+        time.sleep(5)
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/test_trafgen_data.py b/rwlaunchpad/ra/pytest/multivm_vnf/test_trafgen_data.py
new file mode 100644
index 0000000..197e95c
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/test_trafgen_data.py
@@ -0,0 +1,218 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_trafgen_data.py
+@author Karun Ganesharatnam (karun.ganesharatnam@riftio.com)
+@date 03/16/2016
+@brief Scriptable load-balancer test with multi-vm VNFs
+"""
+
+import ipaddress
+import pytest
+import re
+import subprocess
+import time
+
+import rift.auto.session
+
+from gi.repository import (
+    RwTrafgenYang,
+    RwTrafgenDataYang,
+    RwVnfBaseOpdataYang,
+    RwVnfBaseConfigYang,
+    RwTrafgenYang
+)
+
+
+@pytest.fixture(scope='session')
+def trafgen_vnfr(request, rwvnfr_proxy, session_type):
+    vnfr = "/vnfr-catalog/vnfr"
+    vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
+    for vnfr in vnfrs.vnfr:
+        if 'trafgen' in vnfr.short_name:
+            return vnfr
+    assert False, "Not found the VNFR with name 'trafgen'"
+
+@pytest.fixture(scope='session')
+def trafgen_session(request, trafgen_vnfr, session_type):
+    trafgen_host = trafgen_vnfr.vnf_configuration.config_access.mgmt_ip_address
+    if session_type == 'netconf':
+        tg_session = rift.auto.session.NetconfSession(host=trafgen_host)
+    elif session_type == 'restconf':
+        tg_session = rift.auto.session.RestconfSession(host=trafgen_host)
+
+    tg_session.connect()
+    rift.vcs.vcs.wait_until_system_started(tg_session, 900)
+    return tg_session
+
+@pytest.fixture(scope='session')
+def trafgen_ports(request, trafgen_vnfr, session_type):
+    return [cp.name for cp in trafgen_vnfr.connection_point]
+
+@pytest.fixture(scope='module')
+def tgdata_proxy(trafgen_session):
+    '''fixture that returns a proxy to RwTrafgenDataYang'''
+    return trafgen_session.proxy(RwTrafgenDataYang)
+
+
+@pytest.fixture(scope='module')
+def tgcfg_proxy(trafgen_session):
+    '''fixture that returns a proxy to RwTrafgenYang'''
+    return trafgen_session.proxy(RwTrafgenYang)
+
+
+@pytest.fixture(scope='module')
+def vnfdata_proxy(trafgen_session):
+    '''fixture that returns a proxy to RwVnfBaseOpdataYang'''
+    return trafgen_session.proxy(RwVnfBaseOpdataYang)
+
+
+@pytest.fixture(scope='module')
+def vnfcfg_proxy(trafgen_session):
+    '''fixture that returns a proxy to RwVnfBaseConfigYang'''
+    return trafgen_session.proxy(RwVnfBaseConfigYang)
+
+
+def confirm_config(tgcfg_proxy, vnf_name):
+    '''To ensure the configuration is present for the given VNF
+
+    Arguments:
+        vnf_name - vnf name of configuration
+    '''
+    xpath = "/vnf-config/vnf[name='%s'][instance='0']" % vnf_name
+    for _ in range(24):
+        tg_config = tgcfg_proxy.get_config(xpath)
+        if tg_config is not None:
+            break
+        time.sleep(10)
+    else:
+        assert False, "Configuration check timeout"
+
+
+def start_traffic(tgdata_proxy, tgcfg_proxy, port_name):
+    '''Start traffic on the port with the specified name.
+
+    Arguments:
+        port_name - name of port on which to start traffic
+    '''
+    confirm_config(tgcfg_proxy, 'trafgen')
+    rpc_input = RwTrafgenDataYang.RwStartTrafgenTraffic.from_dict({
+        'vnf_name':'trafgen',
+        'vnf_instance':0,
+        'port_name':port_name
+    })
+    rpc_output = RwVnfBaseOpdataYang.YangOutput_RwVnfBaseOpdata_Start_VnfOutput()
+    tgdata_proxy.rpc(rpc_input, rpc_name='start', output_obj=rpc_output)
+
+
+def stop_traffic(tgdata_proxy, port_name):
+    '''Stop traffic on the port with the specified name.
+
+    Arguments:
+        port_name - name of port on which to stop traffic
+    '''
+    rpc_input = RwTrafgenDataYang.RwStopTrafgenTraffic.from_dict({
+        'vnf_name':'trafgen',
+        'vnf_instance':0,
+        'port_name':port_name
+    })
+    rpc_output = RwVnfBaseOpdataYang.YangOutput_RwVnfBaseOpdata_Stop_VnfOutput()
+    tgdata_proxy.rpc(rpc_input, rpc_name='stop', output_obj=rpc_output)
+
+
+def wait_for_traffic_started(vnfdata_proxy, vnf_name, port_name, timeout=120, interval=2, threshold=60):
+    '''Wait for traffic to be started on the specified port
+
+    Traffic is determined to be started if the input/output packets on the port
+    increment during the specified interval
+
+    Arguments:
+        port_name - name of the port being monitored
+        timeout - time allowed for traffic to start
+        interval - interval at which the counters should be checked
+        threhsold - values under the threshold treated as 0
+    '''
+    def value_incremented(previous_sample, current_sample):
+        '''Comparison that returns True if the the sampled counter increased
+        beyond the specified threshold during the sampling interval
+        otherwise returns false
+        '''
+        return (int(current_sample) - int(previous_sample)) > threshold
+
+    xpath = "/vnf-opdata/vnf[name='{}'][instance='0']/port-state[portname='{}']/counters/{}"
+    vnfdata_proxy.wait_for_interval(xpath.format(vnf_name, port_name, 'input-packets'),
+                                    value_incremented, timeout=timeout, interval=interval)
+
+
+def wait_for_traffic_stopped(vnfdata_proxy, vnf_name, port_name, timeout=60, interval=2, threshold=60):
+    '''Wait for traffic to be stopped on the specified port
+
+    Traffic is determined to be stopped if the input/output packets on the port
+    remain unchanged during the specified interval
+
+    Arguments:
+        port_name - name of the port being monitored
+        timeout - time allowed for traffic to start
+        interval - interval at which the counters should be checked
+        threshold - values under the threshold treated as 0
+    '''
+    def value_unchanged(previous_sample, current_sample):
+        '''Comparison that returns True if the the sampled counter increased
+        less than the specified threshold during the sampling interval
+        otherwise returns False
+        '''
+        return (int(current_sample) - int(previous_sample)) < threshold
+
+    xpath = "/vnf-opdata/vnf[name='{}'][instance='0']/port-state[portname='{}']/counters/{}"
+    vnfdata_proxy.wait_for_interval(xpath.format(vnf_name, port_name, 'input-packets'), value_unchanged, timeout=timeout, interval=interval)
+
+@pytest.mark.depends('multivmvnf')
+@pytest.mark.incremental
+class TestMVVSlbDataFlow:
+
+    def test_start_stop_traffic(self, vnfdata_proxy, tgdata_proxy, tgcfg_proxy, trafgen_ports):
+        ''' This test verfies that traffic can be stopped and started on
+        all trafgen ports.
+
+        Arguments:
+            vnfdata_proxy - proxy to retrieve vnf operational data
+            tgdata_proxy - proxy to retrieve trafgen operational data
+            tgcfg_proxy - proxy to retrieve trafgen configuration
+            trafgen_ports - list of port names on which traffic can be started
+        '''
+        time.sleep(300)
+        for port in trafgen_ports:
+            start_traffic(tgdata_proxy, tgcfg_proxy, port)
+            wait_for_traffic_started(vnfdata_proxy, 'trafgen', port)
+            stop_traffic(tgdata_proxy, port)
+            wait_for_traffic_stopped(vnfdata_proxy, 'trafgen',  port)
+
+
+    def test_start_traffic(self, vnfdata_proxy, tgdata_proxy, tgcfg_proxy, trafgen_ports):
+        ''' This test starts traffic on all trafgen ports in preperation for
+        subsequent tests
+
+        Arguments:
+            vnfdata_proxy - proxy to retrieve vnf operational data
+            tgdata_proxy - proxy to retrieve trafgen operational data
+            tgcfg_proxy - proxy to retrieve trafgen configuration
+            trafgen_ports - list of port names on which traffic can be started
+        '''
+        for port in trafgen_ports:
+            start_traffic(tgdata_proxy, tgcfg_proxy, port)
+            wait_for_traffic_started(vnfdata_proxy, 'trafgen', port)
diff --git a/rwlaunchpad/ra/pytest/ns/conftest.py b/rwlaunchpad/ra/pytest/ns/conftest.py
new file mode 100644
index 0000000..a1fa446
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/conftest.py
@@ -0,0 +1,292 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import functools
+import hashlib
+import pytest
+import os
+import tempfile
+import shutil
+import subprocess
+
+import gi
+import rift.auto.session
+import rift.mano.examples.ping_pong_nsd as ping_pong
+import rift.vcs.vcs
+
+class PackageError(Exception):
+    pass
+
+@pytest.fixture(scope='session', autouse=True)
+def cloud_account_name(request):
+    '''fixture which returns the name used to identify the cloud account'''
+    return 'cloud-0'
+
+@pytest.fixture(scope='session')
+def ping_pong_install_dir():
+    '''Fixture containing the location of ping_pong installation
+    '''
+    install_dir = os.path.join(
+        os.environ["RIFT_ROOT"],
+        "images"
+        )
+    return install_dir
+
+@pytest.fixture(scope='session')
+def ping_vnfd_package_file(ping_pong_install_dir):
+    '''Fixture containing the location of the ping vnfd package
+
+    Arguments:
+        ping_pong_install_dir - location of ping_pong installation
+    '''
+    ping_pkg_file = os.path.join(
+            ping_pong_install_dir,
+            "ping_vnfd_with_image.tar.gz",
+            )
+    if not os.path.exists(ping_pkg_file):
+        raise_package_error()
+
+    return ping_pkg_file
+
+
+@pytest.fixture(scope='session')
+def pong_vnfd_package_file(ping_pong_install_dir):
+    '''Fixture containing the location of the pong vnfd package
+
+    Arguments:
+        ping_pong_install_dir - location of ping_pong installation
+    '''
+    pong_pkg_file = os.path.join(
+            ping_pong_install_dir,
+            "pong_vnfd_with_image.tar.gz",
+            )
+    if not os.path.exists(pong_pkg_file):
+        raise_package_error()
+
+    return pong_pkg_file
+
+
+@pytest.fixture(scope='session')
+def ping_pong_nsd_package_file(ping_pong_install_dir):
+    '''Fixture containing the location of the ping_pong_nsd package
+
+    Arguments:
+        ping_pong_install_dir - location of ping_pong installation
+    '''
+    ping_pong_pkg_file = os.path.join(
+            ping_pong_install_dir,
+            "ping_pong_nsd.tar.gz",
+            )
+    if not os.path.exists(ping_pong_pkg_file):
+        raise_package_error()
+
+    return ping_pong_pkg_file
+
+@pytest.fixture(scope='session')
+def image_dirs():
+    ''' Fixture containing a list of directories where images can be found
+    '''
+    rift_build = os.environ['RIFT_BUILD']
+    rift_root = os.environ['RIFT_ROOT']
+    image_dirs = [
+        os.path.join(
+            rift_build,
+            "modules/core/mano/src/core_mano-build/examples/",
+            "ping_pong_ns/ping_vnfd_with_image/images"
+        ),
+        os.path.join(
+            rift_root,
+            "images"
+        )
+    ]
+    return image_dirs
+
+@pytest.fixture(scope='session')
+def image_paths(image_dirs):
+    ''' Fixture containing a mapping of image names to their path images
+
+    Arguments:
+        image_dirs - a list of directories where images are located
+    '''
+    image_paths = {}
+    for image_dir in image_dirs:
+        if os.path.exists(image_dir):
+            names = os.listdir(image_dir)
+            image_paths.update({name:os.path.join(image_dir, name) for name in names})
+    return image_paths
+
+@pytest.fixture(scope='session')
+def path_ping_image(image_paths):
+    ''' Fixture containing the location of the ping image
+
+    Arguments:
+        image_paths - mapping of images to their paths
+    '''
+    return image_paths["Fedora-x86_64-20-20131211.1-sda-ping.qcow2"]
+
+@pytest.fixture(scope='session')
+def path_pong_image(image_paths):
+    ''' Fixture containing the location of the pong image
+
+    Arguments:
+        image_paths - mapping of images to their paths
+    '''
+    return image_paths["Fedora-x86_64-20-20131211.1-sda-pong.qcow2"]
+
+class PingPongFactory:
+    def __init__(self, path_ping_image, path_pong_image, rsyslog_host, rsyslog_port):
+        self.path_ping_image = path_ping_image
+        self.path_pong_image = path_pong_image
+        self.rsyslog_host = rsyslog_host
+        self.rsyslog_port = rsyslog_port
+
+    def generate_descriptors(self):
+        '''Return a new set of ping and pong descriptors
+        '''
+        def md5sum(path):
+            with open(path, mode='rb') as fd:
+                md5 = hashlib.md5()
+                for buf in iter(functools.partial(fd.read, 4096), b''):
+                    md5.update(buf)
+            return md5.hexdigest()
+
+        ping_md5sum = md5sum(self.path_ping_image)
+        pong_md5sum = md5sum(self.path_pong_image)
+
+        ex_userdata = None
+        if self.rsyslog_host and self.rsyslog_port:
+            ex_userdata = '''
+rsyslog:
+  - "$ActionForwardDefaultTemplate RSYSLOG_ForwardFormat"
+  - "*.* @{host}:{port}"
+            '''.format(
+                host=self.rsyslog_host,
+                port=self.rsyslog_port,
+            )
+
+        descriptors = ping_pong.generate_ping_pong_descriptors(
+                pingcount=1,
+                ping_md5sum=ping_md5sum,
+                pong_md5sum=pong_md5sum,
+                ex_ping_userdata=ex_userdata,
+                ex_pong_userdata=ex_userdata,
+        )
+
+        return descriptors
+
+@pytest.fixture(scope='session')
+def ping_pong_factory(path_ping_image, path_pong_image, rsyslog_host, rsyslog_port):
+    '''Fixture returns a factory capable of generating ping and pong descriptors
+    '''
+    return PingPongFactory(path_ping_image, path_pong_image, rsyslog_host, rsyslog_port)
+
+@pytest.fixture(scope='session')
+def ping_pong_records(ping_pong_factory):
+    '''Fixture returns the default set of ping_pong descriptors
+    '''
+    return ping_pong_factory.generate_descriptors()
+
+
+@pytest.fixture(scope='session')
+def descriptors(request, ping_pong_records):
+    def pingpong_descriptors(with_images=True):
+        """Generated the VNFDs & NSD files for pingpong NS.
+
+        Returns:
+            Tuple: file path for ping vnfd, pong vnfd and ping_pong_nsd
+        """
+        ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records
+
+        tmpdir = tempfile.mkdtemp()
+        rift_build = os.environ['RIFT_BUILD']
+        MANO_DIR = os.path.join(
+                rift_build,
+                "modules/core/mano/src/core_mano-build/examples/ping_pong_ns")
+        ping_img = os.path.join(MANO_DIR, "ping_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2")
+        pong_img = os.path.join(MANO_DIR, "pong_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2")
+
+        """ grab cached copies of these files if not found. They may not exist 
+            because our git submodule dependency mgmt
+            will not populate these because they live in .build, not .install
+        """
+        if not os.path.exists(ping_img):
+            ping_img = os.path.join(
+                        os.environ['RIFT_ROOT'], 
+                        'images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2')
+            pong_img = os.path.join(
+                        os.environ['RIFT_ROOT'], 
+                        'images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2')
+
+        for descriptor in [ping_vnfd, pong_vnfd, ping_pong_nsd]:
+            descriptor.write_to_file(output_format='xml', outdir=tmpdir)
+
+        ping_img_path = os.path.join(tmpdir, "{}/images/".format(ping_vnfd.name))
+        pong_img_path = os.path.join(tmpdir, "{}/images/".format(pong_vnfd.name))
+
+        if with_images:
+            os.makedirs(ping_img_path)
+            os.makedirs(pong_img_path)
+            shutil.copy(ping_img, ping_img_path)
+            shutil.copy(pong_img, pong_img_path)
+
+        for dir_name in [ping_vnfd.name, pong_vnfd.name, ping_pong_nsd.name]:
+            subprocess.call([
+                    "sh",
+                    "{rift_install}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh".format(rift_install=os.environ['RIFT_INSTALL']),
+                    tmpdir,
+                    dir_name])
+
+        return (os.path.join(tmpdir, "{}.tar.gz".format(ping_vnfd.name)),
+                os.path.join(tmpdir, "{}.tar.gz".format(pong_vnfd.name)),
+                os.path.join(tmpdir, "{}.tar.gz".format(ping_pong_nsd.name)))
+
+    def haproxy_descriptors():
+        """HAProxy descriptors."""
+        files = [
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/http_client/http_client_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/httpd/httpd_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/haproxy/haproxy_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/waf/waf_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/haproxy_waf_httpd_nsd/haproxy_waf_httpd_nsd.tar.gz")
+            ]
+
+        return files
+
+    if request.config.option.network_service == "pingpong":
+        return pingpong_descriptors()
+    elif request.config.option.network_service == "pingpong_noimg":
+        return pingpong_descriptors(with_images=False)
+    elif request.config.option.network_service == "haproxy":
+        return haproxy_descriptors()
+
+
+@pytest.fixture(scope='session')
+def descriptor_images(request):
+    def haproxy_images():
+        """HAProxy images."""
+        images = [
+            os.path.join(os.getenv('RIFT_ROOT'), "images/haproxy-v03.qcow2"),
+            os.path.join(os.getenv('RIFT_ROOT'), "images/web-app-firewall-v02.qcow2"),
+            os.path.join(os.getenv('RIFT_ROOT'), "images/web-server-v02.qcow2")
+            ]
+
+        return images
+
+    if request.config.option.network_service == "haproxy":
+        return haproxy_images()
+
+    return []
diff --git a/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py b/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py
new file mode 100644
index 0000000..846ef2e
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py
@@ -0,0 +1,170 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import pytest
+
+from gi.repository import NsrYang, RwNsrYang, RwVnfrYang, NsdYang, RwNsdYang
+import rift.auto.session
+
+@pytest.fixture(scope='module')
+def proxy(request, mgmt_session):
+    return mgmt_session.proxy
+
+
+ScalingGroupInstance = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance
+ScalingGroup = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup
+
+INSTANCE_ID = 1
+
+
+@pytest.mark.depends('nsr')
+@pytest.mark.incremental
+class TestScaling:
+    def wait_for_nsr_state(self, proxy, state):
+        """Wait till the NSR reaches a desired state.
+
+        Args:
+            proxy (Callable): Proxy for launchpad session.
+            state (str): Expected state
+        """
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsr = nsr_opdata.nsr[0]
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
+        proxy(RwNsrYang).wait_for(xpath, state, timeout=240)
+
+    def verify_scaling_group(self, proxy, group_name, expected_records_count, scale_out=True):
+        """
+        Args:
+            proxy (Callable): LP session
+            group_name (str): Group name which is being scaled up.
+            scale_out (bool, optional): To identify scale-out/scale-in mode.
+
+        Asserts:
+            1. Additional records are added to the opdata
+            2. Status of the scaling group
+            3. New vnfr record has been created.
+        """
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsr_id = nsr_opdata.nsr[0].ns_instance_config_ref
+
+        xpath = ('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'
+                 '/scaling-group-record[scaling-group-name-ref="{}"]').format(
+                        nsr_id, group_name)
+
+        scaling_record = proxy(NsrYang).get(xpath)
+
+        assert len(scaling_record.instance) == expected_records_count
+
+        for instance in scaling_record.instance:
+            assert instance.op_status == 'running'
+
+            for vnfr in instance.vnfrs:
+                vnfr_record = proxy(RwVnfrYang).get(
+                        "/vnfr-catalog/vnfr[id='{}']".format(vnfr))
+                assert vnfr_record is not None
+
+    def verify_scale_up(self, proxy, group_name, expected):
+        """Verifies the scaling up steps for the group
+        NSR moves from running -> scaling-up -> running
+
+        Args:
+            proxy (callable): LP proxy
+            group_name (str): Name of the group to verify.
+        """
+        self.wait_for_nsr_state(proxy, "scaling-out")
+        self.wait_for_nsr_state(proxy, "running")
+        self.verify_scaling_group(proxy, group_name, expected)
+
+    def verify_scale_in(self, proxy, group_name, expected):
+        """Verifies the scaling in streps for the group.
+        NSR moves from running -> scaling-down -> running
+
+        Args:
+            proxy (callable): LP proxy
+            group_name (str): group name.
+        """
+        self.wait_for_nsr_state(proxy, "scaling-in")
+        self.wait_for_nsr_state(proxy, "running")
+        self.verify_scaling_group(proxy, group_name, expected, scale_out=False)
+
+    def test_wait_for_nsr_configured(self, proxy):
+        """Wait till the NSR state moves to configured before starting scaling
+        tests.
+        """
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        assert len(nsrs) == 1
+        current_nsr = nsrs[0]
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(current_nsr.ns_instance_config_ref)
+        proxy(RwNsrYang).wait_for(xpath, "configured", timeout=240)
+
+
+    def test_min_max_scaling(self, proxy):
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+        nsd_id = nsrs[0].nsd_ref
+        nsr_id = nsrs[0].ns_instance_config_ref
+
+        # group_name = "http_client_group"
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/scaling-group-record".format(nsr_id)
+        scaling_records = proxy(RwNsrYang).get(xpath, list_obj=True)
+
+        for scaling_record in scaling_records.scaling_group_record:
+            group_name = scaling_record.scaling_group_name_ref
+            xpath = "/nsd-catalog/nsd[id='{}']/scaling-group-descriptor[name='{}']".format(
+                    nsd_id, group_name)
+            scaling_group_desc = proxy(NsdYang).get(xpath)
+
+            # Add + 1 to go beyond the threshold
+            for instance_id in range(1, scaling_group_desc.max_instance_count + 1):
+                xpath = '/ns-instance-config/nsr[id="{}"]/scaling-group[scaling-group-name-ref="{}"]'.format(
+                            nsr_id, 
+                            group_name)
+
+                instance = ScalingGroupInstance.from_dict({"id": instance_id})
+                scaling_group = proxy(NsrYang).get(xpath)
+
+                if scaling_group is None:
+                    scaling_group = ScalingGroup.from_dict({
+                        'scaling_group_name_ref': group_name,
+                        })
+
+                scaling_group.instance.append(instance)
+
+                try:
+                    proxy(NsrYang).merge_config(xpath, scaling_group)
+                    self.verify_scale_up(proxy, group_name, instance_id + 1)
+                except rift.auto.session.ProxyRequestError:
+                    assert instance_id == scaling_group_desc.max_instance_count
+
+            for instance_id in range(1, scaling_group_desc.max_instance_count):
+                xpath = ('/ns-instance-config/nsr[id="{}"]/scaling-group'
+                         '[scaling-group-name-ref="{}"]/'
+                         'instance[id="{}"]').format(
+                         nsr_id, group_name, instance_id)
+                proxy(NsrYang).delete_config(xpath)
+                self.verify_scale_in(proxy, group_name, instance_id)
+
+
+
+
+
+
+
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py
new file mode 100644
index 0000000..45a7832
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py
@@ -0,0 +1,677 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 11/03/2015
+@brief Launchpad System Test
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import shutil
+import subprocess
+import tempfile
+import time
+import uuid
+
+import rift.auto.mano
+import rift.auto.session
+import rift.mano.examples.ping_pong_nsd as ping_pong
+
+import gi
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+
+from gi.repository import (
+    NsdYang,
+    RwNsrYang,
+    RwVnfrYang,
+    NsrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+logging.basicConfig(level=logging.DEBUG)
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+@pytest.fixture(scope='module')
+def rwvnfr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfrYang)
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+class DescriptorOnboardError(Exception):
+    pass
+
+def create_nsr(nsd, input_param_list, cloud_account_name):
+    """
+    Create the NSR record object
+
+    Arguments:
+        nsd                 - NSD
+        input_param_list    - list of input-parameter objects
+        cloud_account_name  - name of cloud account
+
+    Return:
+         NSR object
+    """
+    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+
+    nsr.id = str(uuid.uuid4())
+    nsr.name = rift.auto.mano.resource_name(nsr.id)
+    nsr.short_name = "nsr_short_name"
+    nsr.description = "This is a description"
+    nsr.nsd.from_dict(nsr.as_dict())
+    nsr.admin_status = "ENABLED"
+    nsr.input_parameter.extend(input_param_list)
+    nsr.cloud_account = cloud_account_name
+
+    return nsr
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl --insecure -F "descriptor=@{file}" https://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+    )
+
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=30, host="127.0.0.1"):
+
+    def check_status_onboard_status():
+        uri = 'https://%s:4567/api/upload/%s/state' % (host, transaction_id)
+        curl_cmd = 'curl --insecure {uri}'.format(uri=uri)
+        return subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    logger.info("Waiting for onboard transaction [%s] to complete", transaction_id)
+
+    elapsed = 0
+    start = time.time()
+    while elapsed < timeout:
+
+        reply = check_status_onboard_status()
+        state = json.loads(reply)
+        if state["status"] == "success":
+            break
+
+        if state["status"] == "failure":
+            raise DescriptorOnboardError(state["errors"])
+
+        if state["status"] != "pending":
+            raise DescriptorOnboardError(state)
+
+        time.sleep(1)
+        elapsed = time.time() - start
+
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+    logger.info("Descriptor onboard was successful")
+
+def onboard_descriptor(host, file_name, logger, endpoint, scheme, cert):
+    """On-board/update the descriptor.
+
+    Args:
+        host (str): Launchpad IP
+        file_name (str): Full file path.
+        logger: Logger instance
+        endpoint (str): endpoint to be used for the upload operation.
+
+    """
+    logger.info("Onboarding package: %s", file_name)
+    trans_id = upload_descriptor(
+            logger,
+            file_name,
+            host=host)
+    wait_onboard_transaction_finished(
+        logger,
+        trans_id,
+        host=host)
+
+
+def terminate_nsrs(rwvnfr_proxy, rwnsr_proxy, logger):
+    """
+    Terminate the instance and check if the record is deleted.
+
+    Asserts:
+    1. NSR record is deleted from instance-config.
+
+    """
+    logger.debug("Terminating Ping Pong NSRs")
+
+    nsr_path = "/ns-instance-config"
+    nsr = rwnsr_proxy.get_config(nsr_path)
+    nsrs = nsr.nsr
+
+    xpaths = []
+    for ping_pong in nsrs:
+        xpath = "/ns-instance-config/nsr[id='{}']".format(ping_pong.id)
+        rwnsr_proxy.delete_config(xpath)
+        xpaths.append(xpath)
+
+    time.sleep(60)
+    for xpath in xpaths:
+        nsr = rwnsr_proxy.get_config(xpath)
+        assert nsr is None
+
+    # Get the ns-instance-config
+    ns_instance_config = rwnsr_proxy.get_config("/ns-instance-config")
+
+    # Termination tests
+    vnfr = "/vnfr-catalog/vnfr"
+    vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
+    assert vnfrs is None or len(vnfrs.vnfr) == 0
+
+    # nsr = "/ns-instance-opdata/nsr"
+    # nsrs = rwnsr_proxy.get(nsr, list_obj=True)
+    # assert len(nsrs.nsr) == 0
+
+
+def generate_tar_files(tmpdir, ping_vnfd, pong_vnfd, ping_pong_nsd):
+    """Converts the descriptor to files and package them into zip files
+    that can be uploaded to LP instance.
+
+    Args:
+        tmpdir (string): Full path where the zipped files should be
+        ping_vnfd (VirtualNetworkFunction): Ping VNFD data
+        pong_vnfd (VirtualNetworkFunction): Pong VNFD data
+        ping_pong_nsd (NetworkService): PingPong NSD data
+
+    Returns:
+        Tuple: file path for ping vnfd, pong vnfd and ping_pong_nsd
+    """
+    rift_build = os.environ['RIFT_BUILD']
+    MANO_DIR = os.path.join(
+            rift_build,
+            "modules/core/mano/src/core_mano-build/examples/ping_pong_ns")
+    ping_img = os.path.join(MANO_DIR, "ping_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2")
+    pong_img = os.path.join(MANO_DIR, "pong_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2")
+
+    """ grab cached copies of these files if not found. They may not exist
+        because our git submodule dependency mgmt
+        will not populate these because they live in .build, not .install
+    """
+    if not os.path.exists(ping_img):
+        ping_img = os.path.join(
+                    os.environ['RIFT_ROOT'],
+                    'images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2')
+        pong_img = os.path.join(
+                    os.environ['RIFT_ROOT'],
+                    'images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2')
+
+    for descriptor in [ping_vnfd, pong_vnfd, ping_pong_nsd]:
+        descriptor.write_to_file(output_format='xml', outdir=tmpdir.name)
+
+    ping_img_path = os.path.join(tmpdir.name, "{}/images/".format(ping_vnfd.name))
+    pong_img_path = os.path.join(tmpdir.name, "{}/images/".format(pong_vnfd.name))
+    os.makedirs(ping_img_path)
+    os.makedirs(pong_img_path)
+
+    shutil.copy(ping_img, ping_img_path)
+    shutil.copy(pong_img, pong_img_path)
+
+    for dir_name in [ping_vnfd.name, pong_vnfd.name, ping_pong_nsd.name]:
+        subprocess.call([
+                "sh",
+                "{rift_install}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh".format(rift_install=os.environ['RIFT_INSTALL']),
+                tmpdir.name,
+                dir_name])
+
+    return (os.path.join(tmpdir.name, "{}.tar.gz".format(ping_vnfd.name)),
+            os.path.join(tmpdir.name, "{}.tar.gz".format(pong_vnfd.name)),
+            os.path.join(tmpdir.name, "{}.tar.gz".format(ping_pong_nsd.name)))
+
+
+@pytest.mark.setup('pingpong')
+@pytest.mark.depends('launchpad')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestPingPongStart(object):
+    """A brief overview of the steps performed.
+    1. Generate & on-board new descriptors
+    2. Start & stop the ping pong NSR
+    3. Update the exiting descriptor files.
+    4. Start the ping pong NSR.
+
+    """
+
+
+    def test_onboard_descriptors(
+            self,
+            logger,
+            vnfd_proxy,
+            nsd_proxy,
+            mgmt_session,
+            scheme,
+            cert,
+            ping_pong_records):
+        """Generates & On-boards the descriptors.
+        """
+        temp_dirs = []
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        endpoint = "upload"
+
+        """
+        This upload routine can get called multiples times for upload API,
+        depending on the combinations of 'cloud_account' & 'endpoint'
+        fixtures. Since the records are cached at module level, we might end up
+        uploading the same uuids multiple times, thus causing errors. So a
+        simple work-around will be to skip the records when they are uploaded
+        for the second time.
+        """
+        def onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file):
+            # On-board VNFDs
+            for file_name in [ping_vnfd_file, pong_vnfd_file]:
+                onboard_descriptor(
+                        mgmt_session.host,
+                        file_name,
+                        logger,
+                        endpoint,
+                        scheme,
+                        cert)
+
+            catalog = vnfd_proxy.get_config('/vnfd-catalog')
+            vnfds = catalog.vnfd
+            assert len(vnfds) == 2, "There should two vnfds"
+            assert "ping_vnfd" in [vnfds[0].name, vnfds[1].name]
+            assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+
+        def delete_vnfds():
+            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            for vnfd_record in vnfds.vnfd:
+                xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+                vnfd_proxy.delete_config(xpath)
+
+            time.sleep(5)
+            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            assert vnfds is None or len(vnfds.vnfd) == 0
+
+
+        if catalog is not None and len(catalog.vnfd) == 2 and endpoint == "upload":
+            return
+
+        if endpoint == "update":
+            for vnfd_record in [ping_vnfd, pong_vnfd]:
+                vnfd_record.descriptor.vnfd[0].description += "_update"
+            ping_pong_nsd.descriptor.nsd[0].description += "_update"
+
+        tmpdir2 = tempfile.TemporaryDirectory()
+        temp_dirs.append(tmpdir2)
+        ping_pong.generate_ping_pong_descriptors(pingcount=1,
+                                                  write_to_file=True,
+                                                  out_dir=tmpdir2.name,
+                                                  ping_fmt='json',
+                                                  pong_fmt='xml',
+                                                  )
+
+        # On-board VNFDs without image
+        ping_vnfd_file = os.path.join(tmpdir2.name, 'ping_vnfd/vnfd/ping_vnfd.json')
+        pong_vnfd_file = os.path.join(tmpdir2.name, 'pong_vnfd/vnfd/pong_vnfd.xml')
+        onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file)
+
+        delete_vnfds()
+
+        tmpdir = tempfile.TemporaryDirectory()
+        temp_dirs.append(tmpdir)
+
+        ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records
+        ping_vnfd_file, pong_vnfd_file, pingpong_nsd_file = \
+            generate_tar_files(tmpdir, ping_vnfd, pong_vnfd, ping_pong_nsd)
+
+        # On-board VNFDs with image
+        onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file)
+
+        # On-board NSD
+        onboard_descriptor(
+                mgmt_session.host,
+                pingpong_nsd_file,
+                logger,
+                endpoint,
+                scheme,
+                cert)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        assert nsds[0].name == "ping_pong_nsd"
+
+        # Temp directory cleanup
+#         for temp_dir in temp_dirs:
+#             temp_dir.cleanup()
+
+    def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account):
+
+        def verify_input_parameters(running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (run_input_param.xpath == config_param.xpath and
+                    run_input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
+        descr_value = "automation"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd, input_parameters, cloud_account.name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+        assert nsr_opdata is not None
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+    def test_wait_for_pingpong_started(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(
+                    nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=180)
+
+    def test_wait_for_pingpong_configured(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(
+                    nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
+
+
+@pytest.mark.feature("update-api")
+@pytest.mark.depends('pingpong')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestUpdateNsr(object):
+    def test_stop_nsr(self, rwvnfr_proxy, rwnsr_proxy, logger):
+        """Terminate the currently running NSR instance before updating the descriptor files"""
+        terminate_nsrs(rwvnfr_proxy, rwnsr_proxy, logger)
+
+    def test_onboard_descriptors(
+            self,
+            logger,
+            vnfd_proxy,
+            nsd_proxy,
+            mgmt_session,
+            scheme,
+            cert,
+            ping_pong_records):
+        """Generates & On-boards the descriptors.
+        """
+        temp_dirs = []
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        endpoint = "update"
+        ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records                
+
+        """
+        This upload routine can get called multiples times for upload API,
+        depending on the combinations of 'cloud_account' & 'endpoint'
+        fixtures. Since the records are cached at module level, we might end up
+        uploading the same uuids multiple times, thus causing errors. So a
+        simple work-around will be to skip the records when they are uploaded
+        for the second time.
+        """
+        def onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file):
+            # On-board VNFDs
+            for file_name in [ping_vnfd_file, pong_vnfd_file]:
+                onboard_descriptor(
+                        mgmt_session.host,
+                        file_name,
+                        logger,
+                        endpoint,
+                        scheme,
+                        cert)
+
+            catalog = vnfd_proxy.get_config('/vnfd-catalog')
+            vnfds = catalog.vnfd
+
+            assert len(vnfds) == 2, "There should two vnfds"
+            assert "ping_vnfd" in [vnfds[0].name, vnfds[1].name]
+            assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+        def delete_nsds():
+            nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+            for nsd_record in nsds.nsd:
+                xpath = "/nsd-catalog/nsd[id='{}']".format(nsd_record.id)
+                nsd_proxy.delete_config(xpath)
+
+            time.sleep(5)
+            nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+            assert nsds is None or len(nsds.nsd) == 0
+        delete_nsds()
+
+        def delete_vnfds():
+            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            for vnfd_record in vnfds.vnfd:
+                xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+                vnfd_proxy.delete_config(xpath)
+
+            time.sleep(5)
+            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            assert vnfds is None or len(vnfds.vnfd) == 0
+
+        delete_vnfds()
+
+        if catalog is not None and len(catalog.vnfd) == 2 and endpoint == "upload":
+            return
+
+        ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records
+
+        if endpoint == "update":
+            for vnfd_record in [ping_vnfd, pong_vnfd]:
+                vnfd_record.descriptor.vnfd[0].description += "_update"
+            ping_pong_nsd.descriptor.nsd[0].description += "_update"
+
+        tmpdir2 = tempfile.TemporaryDirectory()
+        temp_dirs.append(tmpdir2)
+        ping_pong.generate_ping_pong_descriptors(pingcount=1,
+                                                  write_to_file=True,
+                                                  out_dir=tmpdir2.name,
+                                                  ping_fmt='json',
+                                                  pong_fmt='xml',
+                                                  )
+
+        # On-board VNFDs without image
+        ping_vnfd_file = os.path.join(tmpdir2.name, 'ping_vnfd/vnfd/ping_vnfd.json')
+        pong_vnfd_file = os.path.join(tmpdir2.name, 'pong_vnfd/vnfd/pong_vnfd.xml')
+        onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file)
+        delete_vnfds()
+
+        tmpdir = tempfile.TemporaryDirectory()
+        temp_dirs.append(tmpdir)
+
+        ping_vnfd_file, pong_vnfd_file, pingpong_nsd_file = \
+            generate_tar_files(tmpdir, ping_vnfd, pong_vnfd, ping_pong_nsd)
+
+        # On-board VNFDs with image
+        onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file)
+
+
+        # On-board NSD
+        onboard_descriptor(
+                mgmt_session.host,
+                pingpong_nsd_file,
+                logger,
+                endpoint,
+                scheme,
+                cert)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        assert nsds[0].name == "ping_pong_nsd"
+
+        # Temp directory cleanup
+#         for temp_dir in temp_dirs:
+#             temp_dir.cleanup()
+
+    def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account):
+        def verify_input_parameters(running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (run_input_param.xpath == config_param.xpath and
+                    run_input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
+        descr_value = "automation"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd, input_parameters, cloud_account.name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+        assert nsr_opdata is not None
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+    def test_wait_for_pingpong_started(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(
+                    nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=180)
+
+    def test_wait_for_pingpong_configured(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(
+                    nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
+
+
+@pytest.mark.teardown('pingpong')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestPingPongTeardown(object):
+    def test_terminate_nsrs(self, rwvnfr_proxy, rwnsr_proxy, logger):
+        """
+        Terminate the instance and check if the record is deleted.
+
+        Asserts:
+        1. NSR record is deleted from instance-config.
+
+        """
+        logger.debug("Terminating Ping Pong NSR")
+        terminate_nsrs(rwvnfr_proxy, rwnsr_proxy, logger)
+
+    def test_delete_records(self, nsd_proxy, vnfd_proxy):
+        """Delete the NSD & VNFD records
+
+        Asserts:
+            The records are deleted.
+        """
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            nsd_proxy.delete_config(xpath)
+
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong_longevity.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong_longevity.py
new file mode 100644
index 0000000..ff8fa96
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong_longevity.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2016/01/04
+#
+
+import pytest
+import rift.vcs.vcs
+import time
+
+import gi
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+def test_launchpad_longevity(mgmt_session, mgmt_domain_name, rwnsr_proxy):
+    time.sleep(60)
+    rift.vcs.vcs.wait_until_system_started(mgmt_session)
+
+    nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+    for nsr in nsr_opdata.nsr:
+        xpath = ("/ns-instance-opdata"
+                 "/nsr[ns-instance-config-ref='%s']"
+                 "/operational-status") % (nsr.ns_instance_config_ref)
+        operational_status = rwnsr_proxy.get(xpath)
+        assert operational_status == 'running'
+
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py
new file mode 100644
index 0000000..920bd70
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py
@@ -0,0 +1,487 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import socket
+import subprocess
+import time
+
+import pytest
+
+import gi
+import re
+gi.require_version('RwNsrYang', '1.0')
+from gi.repository import (
+        NsdYang,
+        RwBaseYang,
+        RwConmanYang,
+        RwNsrYang,
+        RwNsdYang,
+        RwVcsYang,
+        RwVlrYang,
+        RwVnfdYang,
+        RwVnfrYang,
+        VlrYang,
+        VnfrYang,
+        )
+import rift.auto.session
+import rift.mano.examples.ping_pong_nsd as ping_pong
+
+
+@pytest.fixture(scope='module')
+def proxy(request, mgmt_session):
+    return mgmt_session.proxy
+
+@pytest.fixture(scope='session')
+def updated_ping_pong_records(ping_pong_factory):
+    '''Fixture returns a newly created set of ping and pong descriptors
+    for the create_update tests
+    '''
+    return ping_pong_factory.generate_descriptors()
+
+def yield_vnfd_vnfr_pairs(proxy, nsr=None):
+    """
+    Yields tuples of vnfd & vnfr entries.
+
+    Args:
+        proxy (callable): Launchpad proxy
+        nsr (optional): If specified, only the vnfr & vnfd records of the NSR
+                are returned
+
+    Yields:
+        Tuple: VNFD and its corresponding VNFR entry
+    """
+    def get_vnfd(vnfd_id):
+        xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_id)
+        return proxy(RwVnfdYang).get(xpath)
+
+    vnfr = "/vnfr-catalog/vnfr"
+    vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True)
+    for vnfr in vnfrs.vnfr:
+
+        if nsr:
+            const_vnfr_ids = [const_vnfr.vnfr_id for const_vnfr in nsr.constituent_vnfr_ref]
+            if vnfr.id not in const_vnfr_ids:
+                continue
+
+        vnfd = get_vnfd(vnfr.vnfd_ref)
+        yield vnfd, vnfr
+
+
+def yield_nsd_nsr_pairs(proxy):
+    """Yields tuples of NSD & NSR
+
+    Args:
+        proxy (callable): Launchpad proxy
+
+    Yields:
+        Tuple: NSD and its corresponding NSR record
+    """
+
+    for nsr_cfg, nsr in yield_nsrc_nsro_pairs(proxy):
+        nsd_path = "/nsd-catalog/nsd[id='{}']".format(
+                nsr_cfg.nsd.id)
+        nsd = proxy(RwNsdYang).get_config(nsd_path)
+
+        yield nsd, nsr
+
+def yield_nsrc_nsro_pairs(proxy):
+    """Yields tuples of NSR Config & NSR Opdata pairs
+
+    Args:
+        proxy (callable): Launchpad proxy
+
+    Yields:
+        Tuple: NSR config and its corresponding NSR op record
+    """
+    nsr = "/ns-instance-opdata/nsr"
+    nsrs = proxy(RwNsrYang).get(nsr, list_obj=True)
+    for nsr in nsrs.nsr:
+        nsr_cfg_path = "/ns-instance-config/nsr[id='{}']".format(
+                nsr.ns_instance_config_ref)
+        nsr_cfg = proxy(RwNsrYang).get_config(nsr_cfg_path)
+
+        yield nsr_cfg, nsr
+
+
+def assert_records(proxy):
+    """Verifies if the NSR & VNFR records are created
+    """
+    ns_tuple = list(yield_nsd_nsr_pairs(proxy))
+    assert len(ns_tuple) == 1
+
+    vnf_tuple = list(yield_vnfd_vnfr_pairs(proxy))
+    assert len(vnf_tuple) == 2
+
+
+@pytest.mark.depends('nsr')
+@pytest.mark.setup('records')
+@pytest.mark.usefixtures('recover_tasklet')
+@pytest.mark.incremental
+class TestRecordsData(object):
+    def is_valid_ip(self, address):
+        """Verifies if it is a valid IP and if its accessible
+
+        Args:
+            address (str): IP address
+
+        Returns:
+            boolean
+        """
+        try:
+            socket.inet_aton(address)
+        except socket.error:
+            return False
+        else:
+            return True
+
+
+    @pytest.mark.feature("recovery")
+    def test_tasklets_recovery(self, mgmt_session, proxy, recover_tasklet):
+        """Test the recovery feature of tasklets
+
+        Triggers the vcrash and waits till the system is up
+        """
+        RECOVERY = "RESTART"
+
+        def vcrash(comp):
+            rpc_ip = RwVcsYang.VCrashInput.from_dict({"instance_name": comp})
+            proxy(RwVcsYang).rpc(rpc_ip)
+
+        tasklet_name = r'^{}-.*'.format(recover_tasklet)
+
+        vcs_info = proxy(RwBaseYang).get("/vcs/info/components")
+        for comp in vcs_info.component_info:
+            if comp.recovery_action == RECOVERY and \
+               re.match(tasklet_name, comp.instance_name):
+                vcrash(comp.instance_name)
+
+        time.sleep(60)
+
+        rift.vcs.vcs.wait_until_system_started(mgmt_session)
+        # NSM tasklet takes a couple of seconds to set up the python structure
+        # so sleep and then continue with the tests.
+        time.sleep(60)
+
+    def test_records_present(self, proxy):
+        assert_records(proxy)
+
+    def test_nsd_ref_count(self, proxy):
+        """
+        Asserts
+        1. The ref count data of the NSR with the actual number of NSRs
+        """
+        nsd_ref_xpath = "/ns-instance-opdata/nsd-ref-count"
+        nsd_refs = proxy(RwNsrYang).get(nsd_ref_xpath, list_obj=True)
+
+        expected_ref_count = collections.defaultdict(int)
+        for nsd_ref in nsd_refs.nsd_ref_count:
+            expected_ref_count[nsd_ref.nsd_id_ref] = nsd_ref.instance_ref_count
+
+        actual_ref_count = collections.defaultdict(int)
+        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+            actual_ref_count[nsd.id] += 1
+
+        assert expected_ref_count == actual_ref_count
+
+    def test_vnfd_ref_count(self, proxy):
+        """
+        Asserts
+        1. The ref count data of the VNFR with the actual number of VNFRs
+        """
+        vnfd_ref_xpath = "/vnfr-catalog/vnfd-ref-count"
+        vnfd_refs = proxy(RwVnfrYang).get(vnfd_ref_xpath, list_obj=True)
+
+        expected_ref_count = collections.defaultdict(int)
+        for vnfd_ref in vnfd_refs.vnfd_ref_count:
+            expected_ref_count[vnfd_ref.vnfd_id_ref] = vnfd_ref.instance_ref_count
+
+        actual_ref_count = collections.defaultdict(int)
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            actual_ref_count[vnfd.id] += 1
+
+        assert expected_ref_count == actual_ref_count
+
+    def test_nsr_nsd_records(self, proxy):
+        """
+        Verifies the correctness of the NSR record using its NSD counter-part
+
+        Asserts:
+        1. The count of vnfd and vnfr records
+        2. Count of connection point descriptor and records
+        """
+        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+            assert nsd.name == nsr.nsd_name_ref
+            assert len(nsd.constituent_vnfd) == len(nsr.constituent_vnfr_ref)
+
+            assert len(nsd.vld) == len(nsr.vlr)
+            for vnfd_conn_pts, vnfr_conn_pts in zip(nsd.vld, nsr.vlr):
+                assert len(vnfd_conn_pts.vnfd_connection_point_ref) == \
+                       len(vnfr_conn_pts.vnfr_connection_point_ref)
+
+    def test_vdu_record_params(self, proxy):
+        """
+        Asserts:
+        1. If a valid floating IP has been assigned to the VM
+        2. Count of VDUD and the VDUR
+        3. Check if the VM flavor has been copied over the VDUR
+        """
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            assert vnfd.mgmt_interface.port == vnfr.mgmt_interface.port
+            assert len(vnfd.vdu) == len(vnfr.vdur)
+
+            for vdud, vdur in zip(vnfd.vdu, vnfr.vdur):
+                assert vdud.vm_flavor == vdur.vm_flavor
+                assert self.is_valid_ip(vdur.management_ip) is True
+                assert vdud.external_interface[0].vnfd_connection_point_ref == \
+                    vdur.external_interface[0].vnfd_connection_point_ref
+
+    def test_external_vl(self, proxy):
+        """
+        Asserts:
+        1. Valid IP for external connection point
+        2. A valid external network fabric
+        3. Connection point names are copied over
+        4. Count of VLD and VLR
+        5. Checks for a valid subnet ?
+        6. Checks for the operational status to be running?
+        """
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            cp_des, cp_rec = vnfd.connection_point, vnfr.connection_point
+
+            assert len(cp_des) == len(cp_rec)
+            assert cp_des[0].name == cp_rec[0].name
+            assert self.is_valid_ip(cp_rec[0].ip_address) is True
+
+            xpath = "/vlr-catalog/vlr[id='{}']".format(cp_rec[0].vlr_ref)
+            vlr = proxy(RwVlrYang).get(xpath)
+
+            assert len(vlr.network_id) > 0
+            assert len(vlr.assigned_subnet) > 0
+            ip, _ = vlr.assigned_subnet.split("/")
+            assert self.is_valid_ip(ip) is True
+            assert vlr.operational_status == "running"
+
+
+    def test_nsr_record(self, proxy):
+        """
+        Currently we only test for the components of NSR tests. Ignoring the
+        operational-events records
+
+        Asserts:
+        1. The constituent components.
+        2. Admin status of the corresponding NSD record.
+        """
+        for nsr_cfg, nsr in yield_nsrc_nsro_pairs(proxy):
+            # 1 n/w and 2 connection points
+            assert len(nsr.vlr) == 1
+            assert len(nsr.vlr[0].vnfr_connection_point_ref) == 2
+
+            assert len(nsr.constituent_vnfr_ref) == 2
+            assert nsr_cfg.admin_status == 'ENABLED'
+
+    def test_wait_for_pingpong_configured(self, proxy):
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        assert len(nsrs) == 1
+        current_nsr = nsrs[0]
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(current_nsr.ns_instance_config_ref)
+        proxy(RwNsrYang).wait_for(xpath, "configured", timeout=400)
+
+    def test_monitoring_params(self, proxy):
+        """
+        Asserts:
+        1. The value counter ticks?
+        2. If the meta fields are copied over
+        """
+        def mon_param_record(vnfr_id, mon_param_id):
+             return '/vnfr-catalog/vnfr[id="{}"]/monitoring-param[id="{}"]'.format(
+                    vnfr_id, mon_param_id)
+
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            for mon_des in (vnfd.monitoring_param):
+                mon_rec = mon_param_record(vnfr.id, mon_des.id)
+                mon_rec = proxy(VnfrYang).get(mon_rec)
+
+                # Meta data check
+                fields = mon_des.as_dict().keys()
+                for field in fields:
+                    assert getattr(mon_des, field) == getattr(mon_rec, field)
+                # Tick check
+                #assert mon_rec.value_integer > 0
+
+    def test_cm_nsr(self, proxy):
+        """
+        Asserts:
+            1. The ID of the NSR in cm-state
+            2. Name of the cm-nsr
+            3. The vnfr component's count
+            4. State of the cm-nsr
+        """
+        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+            con_nsr_xpath = "/cm-state/cm-nsr[id='{}']".format(nsr.ns_instance_config_ref)
+            con_data = proxy(RwConmanYang).get(con_nsr_xpath)
+
+            assert con_data.name == "ping_pong_nsd"
+            assert len(con_data.cm_vnfr) == 2
+
+            state_path = con_nsr_xpath + "/state"
+            proxy(RwConmanYang).wait_for(state_path, 'ready', timeout=120)
+
+    def test_cm_vnfr(self, proxy):
+        """
+        Asserts:
+            1. The ID of Vnfr in cm-state
+            2. Name of the vnfr
+            3. State of the VNFR
+            4. Checks for a reachable IP in mgmt_interface
+            5. Basic checks for connection point and cfg_location.
+        """
+        def is_reachable(ip, timeout=10):
+            rc = subprocess.call(["ping", "-c1", "-w", str(timeout), ip])
+            if rc == 0:
+                return True
+            return False
+
+        nsr_cfg, _ = list(yield_nsrc_nsro_pairs(proxy))[0]
+        con_nsr_xpath = "/cm-state/cm-nsr[id='{}']".format(nsr_cfg.id)
+
+        for _, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            con_vnfr_path = con_nsr_xpath + "/cm-vnfr[id='{}']".format(vnfr.id)
+            con_data = proxy(RwConmanYang).get(con_vnfr_path)
+
+            assert con_data is not None
+
+            state_path = con_vnfr_path + "/state"
+            proxy(RwConmanYang).wait_for(state_path, 'ready', timeout=120)
+
+            con_data = proxy(RwConmanYang).get(con_vnfr_path)
+            assert is_reachable(con_data.mgmt_interface.ip_address) is True
+
+            assert len(con_data.connection_point) == 1
+            connection_point = con_data.connection_point[0]
+            assert connection_point.name == vnfr.connection_point[0].name
+            assert connection_point.ip_address == vnfr.connection_point[0].ip_address
+
+            assert con_data.cfg_location is not None
+
+@pytest.mark.depends('nsr')
+@pytest.mark.setup('nfvi')
+@pytest.mark.incremental
+class TestNfviMetrics(object):
+
+    def test_records_present(self, proxy):
+        assert_records(proxy)
+
+    @pytest.mark.skipif(True, reason='NFVI metrics collected from NSR are deprecated, test needs to be updated to collected metrics from VNFRs')
+    def test_nfvi_metrics(self, proxy):
+        """
+        Verify the NFVI metrics
+
+        Asserts:
+            1. Computed metrics, such as memory, cpu, storage and ports, match
+               with the metrics in NSR record. The metrics are computed from the
+               descriptor records.
+            2. Check if the 'utilization' field has a valid value (> 0) and matches
+               with the 'used' field, if available.
+        """
+        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+            nfvi_metrics = nsr.nfvi_metrics
+            computed_metrics = collections.defaultdict(int)
+
+            # Get the constituent VNF records.
+            for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy, nsr):
+                vdu = vnfd.vdu[0]
+                vm_spec = vdu.vm_flavor
+                computed_metrics['vm'] += 1
+                computed_metrics['memory'] += vm_spec.memory_mb * (10**6)
+                computed_metrics['storage'] += vm_spec.storage_gb * (10**9)
+                computed_metrics['vcpu'] += vm_spec.vcpu_count
+                computed_metrics['external_ports'] += len(vnfd.connection_point)
+                computed_metrics['internal_ports'] += len(vdu.internal_connection_point)
+
+            assert nfvi_metrics.vm.active_vm == computed_metrics['vm']
+
+            # Availability checks
+            for metric_name in computed_metrics:
+                metric_data = getattr(nfvi_metrics, metric_name)
+                total_available = getattr(metric_data, 'total', None)
+
+                if total_available is not None:
+                    assert computed_metrics[metric_name] == total_available
+
+            # Utilization checks
+            for metric_name in ['memory', 'storage', 'vcpu']:
+                metric_data = getattr(nfvi_metrics, metric_name)
+
+                utilization = metric_data.utilization
+                # assert utilization > 0
+
+                # If used field is available, check if it matches with utilization!
+                total = metric_data.total
+                used = getattr(metric_data, 'used', None)
+                if used is not None:
+                    assert total > 0
+                    computed_utilization = round((used/total) * 100, 2)
+                    assert abs(computed_utilization - utilization) <= 0.1
+
+
+
+@pytest.mark.depends('nfvi')
+@pytest.mark.incremental
+class TestRecordsDescriptors:
+    def test_create_update_vnfd(self, proxy, updated_ping_pong_records):
+        """
+        Verify VNFD related operations
+
+        Asserts:
+            If a VNFD record is created
+        """
+        ping_vnfd, pong_vnfd, _ = updated_ping_pong_records
+        vnfdproxy = proxy(RwVnfdYang)
+
+        for vnfd_record in [ping_vnfd, pong_vnfd]:
+            xpath = "/vnfd-catalog/vnfd"
+            vnfdproxy.create_config(xpath, vnfd_record.vnfd)
+
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd = vnfdproxy.get(xpath)
+            assert vnfd.id == vnfd_record.id
+
+            vnfdproxy.replace_config(xpath, vnfd_record.vnfd)
+
+    def test_create_update_nsd(self, proxy, updated_ping_pong_records):
+        """
+        Verify NSD related operations
+
+        Asserts:
+            If NSD record was created
+        """
+        _, _, ping_pong_nsd = updated_ping_pong_records
+        nsdproxy = proxy(NsdYang)
+
+        xpath = "/nsd-catalog/nsd"
+        nsdproxy.create_config(xpath, ping_pong_nsd.descriptor)
+
+        xpath = "/nsd-catalog/nsd[id='{}']".format(ping_pong_nsd.id)
+        nsd = nsdproxy.get(xpath)
+        assert nsd.id == ping_pong_nsd.id
+
+        nsdproxy.replace_config(xpath, ping_pong_nsd.descriptor)
+
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py
new file mode 100644
index 0000000..0878db7
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py
@@ -0,0 +1,192 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_scaling.py
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 07/13/2016
+@brief Pingpong scaling system test
+"""
+
+import os
+import pytest
+import subprocess
+import sys
+import time
+import uuid
+
+import rift.auto.mano
+import rift.auto.session
+import rift.auto.descriptor
+
+from gi.repository import (
+    NsrYang,
+    NsdYang,
+    VnfrYang,
+    RwNsrYang,
+    RwNsdYang,
+    RwVnfrYang,
+)
+
+@pytest.mark.setup('pingpong_nsd')
+@pytest.mark.depends('launchpad')
+class TestSetupPingpongNsd(object):
+    def test_onboard(self, mgmt_session, descriptors):
+        for descriptor in descriptors:
+            rift.auto.descriptor.onboard(mgmt_session.host, descriptor)
+
+    def test_install_sar(self, mgmt_session):
+        install_cmd = 'ssh {mgmt_ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sudo yum install sysstat --assumeyes'.format(
+                mgmt_ip=mgmt_session.host,
+        )
+        subprocess.check_call(install_cmd, shell=True)
+
+
+@pytest.fixture(scope='function', params=[5,10,15,20,25])
+def service_count(request):
+    '''Fixture representing the number of services to test'''
+    return request.param
+
+@pytest.mark.depends('pingpong_nsd')
+class TestScaling(object):
+    @pytest.mark.preserve_fixture_order
+    def test_scaling(self, mgmt_session, cloud_account_name, service_count):
+
+        def start_services(mgmt_session, desired_service_count, max_attempts=3): 
+            catalog = mgmt_session.proxy(NsdYang).get_config('/nsd-catalog')
+            nsd = catalog.nsd[0]
+            
+            nsr_path = "/ns-instance-config"
+            nsr = mgmt_session.proxy(RwNsrYang).get_config(nsr_path)
+            service_count = len(nsr.nsr)
+
+            attempts = 0
+            while attempts < max_attempts and service_count < desired_service_count:
+                attempts += 1
+
+                for count in range(service_count, desired_service_count):
+                    nsr = rift.auto.descriptor.create_nsr(
+                        cloud_account_name,
+                        "pingpong_%s" % str(uuid.uuid4().hex[:10]),
+                        nsd.id)
+                    mgmt_session.proxy(RwNsrYang).create_config('/ns-instance-config/nsr', nsr)
+
+                ns_instance_opdata = mgmt_session.proxy(RwNsrYang).get('/ns-instance-opdata')
+                for nsr in ns_instance_opdata.nsr:
+                    try:
+                        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
+                        mgmt_session.proxy(RwNsrYang).wait_for(xpath, "running", fail_on=['failed'], timeout=180)
+                        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(nsr.ns_instance_config_ref)
+                        mgmt_session.proxy(RwNsrYang).wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
+                        service_count += 1
+                    except rift.auto.session.ProxyWaitForError:
+                        mgmt_session.proxy(RwNsrYang).delete_config("/ns-instance-config/nsr[id='{}']".format(nsr.ns_instance_config_ref))
+
+        def monitor_launchpad_performance(service_count, interval=30, samples=1):
+            sar_cmd = "ssh {mgmt_ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sar -A {interval} {samples}".format(
+                    mgmt_ip=mgmt_session.host,
+                    interval=interval,
+                    samples=samples
+            )
+            output = subprocess.check_output(sar_cmd, shell=True, stderr=subprocess.STDOUT)
+            outfile = '{rift_artifacts}/scaling_{task_id}.log'.format(
+                    rift_artifacts=os.environ.get('RIFT_ARTIFACTS'),
+                    task_id=os.environ.get('AUTO_TASK_ID')
+            )
+            with open(outfile, 'a') as fh:
+                message = '''
+== SCALING RESULTS : {service_count} Network Services ==
+{output}               
+                '''.format(service_count=service_count, output=output.decode())
+                fh.write(message)
+
+        start_services(mgmt_session, service_count)
+        monitor_launchpad_performance(service_count, interval=30, samples=1)
+
+@pytest.mark.depends('pingpong_nsd')
+@pytest.mark.teardown('pingpong_nsd')
+class TestTeardownPingpongNsr(object):
+    def test_teardown_nsr(self, mgmt_session):
+
+        ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config('/ns-instance-config')
+        for nsr in ns_instance_config.nsr:
+            mgmt_session.proxy(RwNsrYang).delete_config("/ns-instance-config/nsr[id='{}']".format(nsr.id))
+
+        time.sleep(60)
+        vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get('/vnfr-catalog')
+        assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
+    def test_generate_plots(self):
+        plot_commands = [
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_cpu_{task_id}.png" '
+                    '--title "CPU Utilization by network service count" '
+                    '--keys CPU '
+                    '--fields %usr,%idle,%sys '
+                    '--key-filter CPU:all '
+                    '--ylabel "CPU Utilization %" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_mem_{task_id}.png" '
+                    '--title "Memory Utilization by network service count" '
+                    '--fields kbmemfree,kbmemused,kbbuffers,kbcached,kbcommit,kbactive,kbinact,kbdirty '
+                    '--ylabel "Memory Utilization" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_mempct_{task_id}.png" '
+                    '--title "Memory Utilization by network service count" '
+                    '--fields %memused,%commit '
+                    '--ylabel "Memory Utilization %" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_iface_{task_id}.png" '
+                    '--title "Interface Utilization by network service count" '
+                    '--keys IFACE '
+                    '--fields rxpck/s,txpck/s,rxkB/s,txkB/s,rxcmp/s,txcmp/s,rxmcst/s '
+                    '--key-filter IFACE:eth0 '
+                    '--ylabel "Interface Utilization" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_iface_err_{task_id}.png" '
+                    '--title "Interface Errors by network service count" '
+                    '--keys IFACE '
+                    '--fields rxerr/s,txerr/s,coll/s,rxdrop/s,txdrop/s,txcarr/s,rxfram/s,rxfifo/s,txfifo/s '
+                    '--key-filter IFACE:eth0 '
+                    '--ylabel "Interface Errors" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+        ]
+
+        for cmd in plot_commands:
+            subprocess.check_call(
+                    cmd.format(
+                        rift_install=os.environ.get('RIFT_INSTALL'),
+                        rift_artifacts=os.environ.get('RIFT_ARTIFACTS'),
+                        task_id=os.environ.get('AUTO_TASK_ID')
+                    ),
+                    shell=True
+            )
+
diff --git a/rwlaunchpad/ra/pytest/ns/test_onboard.py b/rwlaunchpad/ra/pytest/ns/test_onboard.py
new file mode 100644
index 0000000..5951ce8
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/test_onboard.py
@@ -0,0 +1,408 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_onboard.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@brief Onboard descriptors
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import shutil
+import subprocess
+import time
+import uuid
+
+import rift.auto.mano
+import rift.auto.session
+
+import gi
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+
+from gi.repository import (
+    RwcalYang,
+    NsdYang,
+    RwNsrYang,
+    RwVnfrYang,
+    NsrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+@pytest.fixture(scope='module')
+def rwvnfr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfrYang)
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+
+@pytest.fixture(scope="module")
+def endpoint():
+    return "upload"
+
+def create_nsr(nsd, input_param_list, cloud_account_name):
+    """
+    Create the NSR record object
+
+    Arguments:
+         nsd              -  NSD
+         input_param_list - list of input-parameter objects
+
+    Return:
+         NSR object
+    """
+    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+
+    nsr.id = str(uuid.uuid4())
+    nsr.name = rift.auto.mano.resource_name(nsr.id)
+    nsr.short_name = "nsr_short_name"
+    nsr.description = "This is a description"
+    nsr.nsd.from_dict(nsd.as_dict())
+    nsr.admin_status = "ENABLED"
+    nsr.input_parameter.extend(input_param_list)
+    nsr.cloud_account = cloud_account_name
+
+    return nsr
+
+
+def upload_descriptor(
+        logger,
+        descriptor_file,
+        scheme,
+        cert,
+        host="127.0.0.1",
+        endpoint="upload"):
+    curl_cmd = ('curl --cert {cert} --key {key} -F "descriptor=@{file}" -k '
+                '{scheme}://{host}:4567/api/{endpoint}'.format(
+            cert=cert[0],
+            key=cert[1],
+            scheme=scheme,
+            endpoint=endpoint,
+            file=descriptor_file,
+            host=host,
+            ))
+
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_onboard_transaction_finished(
+        logger,
+        transaction_id,
+        scheme,
+        cert,
+        timeout=600,
+        host="127.0.0.1",
+        endpoint="upload"):
+
+    logger.info("Waiting for onboard trans_id %s to complete", transaction_id)
+    uri = '%s://%s:4567/api/%s/%s/state' % (scheme, host, endpoint, transaction_id)
+
+    elapsed = 0
+    start = time.time()
+    while elapsed < timeout:
+        reply = requests.get(uri, cert=cert, verify=False)
+        state = reply.json()
+        if state["status"] == "success":
+            break
+        if state["status"] != "pending":
+            raise DescriptorOnboardError(state)
+
+        time.sleep(1)
+        elapsed = time.time() - start
+
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+    logger.info("Descriptor onboard was successful")
+
+
+def onboard_descriptor(host, file_name, logger, endpoint, scheme, cert):
+    """On-board/update the descriptor.
+
+    Args:
+        host (str): Launchpad IP
+        file_name (str): Full file path.
+        logger: Logger instance
+        endpoint (str): endpoint to be used for the upload operation.
+
+    """
+    logger.info("Onboarding package: %s", file_name)
+    trans_id = upload_descriptor(
+            logger,
+            file_name,
+            scheme,
+            cert,
+            host=host,
+            endpoint=endpoint)
+    wait_onboard_transaction_finished(
+        logger,
+        trans_id,
+        scheme,
+        cert,
+        host=host,
+        endpoint=endpoint)
+
+def terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger, wait_after_kill=True):
+    """
+    Terminate the instance and check if the record is deleted.
+
+    Asserts:
+    1. NSR record is deleted from instance-config.
+
+    """
+    logger.debug("Terminating NSRs")
+
+    nsr_path = "/ns-instance-config"
+    nsr = rwnsr_proxy.get_config(nsr_path)
+    nsrs = nsr.nsr
+
+    xpaths = []
+    for nsr in nsrs:
+        xpath = "/ns-instance-config/nsr[id='{}']".format(nsr.id)
+        rwnsr_proxy.delete_config(xpath)
+        xpaths.append(xpath)
+
+    if wait_after_kill:
+        time.sleep(30)
+    else:
+        time.sleep(5)
+
+    for xpath in xpaths:
+        nsr = rwnsr_proxy.get_config(xpath)
+        assert nsr is None
+
+    # Get the ns-instance-config
+    ns_instance_config = rwnsr_proxy.get_config("/ns-instance-config")
+
+    # Termination tests
+    vnfr = "/vnfr-catalog/vnfr"
+    vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
+    assert vnfrs is None or len(vnfrs.vnfr) == 0
+
+    # nsr = "/ns-instance-opdata/nsr"
+    # nsrs = rwnsr_proxy.get(nsr, list_obj=True)
+    # assert len(nsrs.nsr) == 0
+
+
+
+@pytest.mark.setup('nsr')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestNsrStart(object):
+    """A brief overview of the steps performed.
+    1. Generate & on-board new descriptors
+    2. Start the NSR 
+    """
+
+    def test_upload_descriptors(
+            self,
+            logger,
+            vnfd_proxy,
+            nsd_proxy,
+            mgmt_session,
+            scheme,
+            cert,
+            descriptors
+        ):
+        """Generates & On-boards the descriptors.
+        """
+        endpoint = "upload"
+
+        for file_name in descriptors:
+            onboard_descriptor(
+                    mgmt_session.host,
+                    file_name,
+                    logger,
+                    endpoint,
+                    scheme,
+                    cert)
+
+        descriptor_vnfds, descriptor_nsd = descriptors[:-1], descriptors[-1]
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        actual_vnfds = catalog.vnfd
+        assert len(actual_vnfds) == len(descriptor_vnfds), \
+                "There should {} vnfds".format(len(descriptor_vnfds))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        actual_nsds = catalog.nsd
+        assert len(actual_nsds) == 1, "There should only be a single nsd"
+
+    @pytest.mark.feature("upload-image")
+    def test_upload_images(self, descriptor_images, cloud_host, cloud_user, cloud_tenants):
+
+        openstack = rift.auto.mano.OpenstackManoSetup(
+                cloud_host,
+                cloud_user,
+                [(tenant, "private") for tenant in cloud_tenants])
+
+        for image_location in descriptor_images:
+            image = RwcalYang.ImageInfoItem.from_dict({
+                    'name': os.path.basename(image_location),
+                    'location': image_location,
+                    'disk_format': 'qcow2',
+                    'container_format': 'bare'})
+            openstack.create_image(image)
+
+
+    def test_set_scaling_params(self, nsd_proxy):
+        nsds = nsd_proxy.get('/nsd-catalog')
+        nsd = nsds.nsd[0]
+        for scaling_group in nsd.scaling_group_descriptor:
+            scaling_group.max_instance_count = 2
+
+        nsd_proxy.replace_config('/nsd-catalog/nsd[id="{}"]'.format(
+            nsd.id), nsd)
+
+
+    def test_instantiate_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account_name):
+
+        def verify_input_parameters(running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (run_input_param.xpath == config_param.xpath and
+                    run_input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+        descr_value = "New NSD Description"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd, input_parameters, cloud_account_name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+        assert nsr_opdata is not None
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+    def test_wait_for_nsr_started(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=240)
+
+
+@pytest.mark.teardown('nsr')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestNsrTeardown(object):
+    def test_terminate_nsr(self, rwvnfr_proxy, rwnsr_proxy, logger, cloud_type):
+        """
+        Terminate the instance and check if the record is deleted.
+
+        Asserts:
+        1. NSR record is deleted from instance-config.
+
+        """
+        logger.debug("Terminating NSR")
+
+        wait_after_kill = True
+        if cloud_type == "mock":
+            wait_after_kill = False
+
+        terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger, wait_after_kill=wait_after_kill)
+
+    def test_delete_records(self, nsd_proxy, vnfd_proxy):
+        """Delete the NSD & VNFD records
+
+        Asserts:
+            The records are deleted.
+        """
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            nsd_proxy.delete_config(xpath)
+
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/test_failover.py b/rwlaunchpad/ra/pytest/test_failover.py
new file mode 100755
index 0000000..40dd7d0
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/test_failover.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python3
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_failover.py
+@brief System test of stopping launchpad on master and
+validating configuration on standby
+"""
+import os
+import sys
+import time
+import argparse
+import subprocess
+
+import gi
+from gi.repository import RwVnfdYang
+from gi.repository import RwVnfrYang
+
+import rift.auto.proxy
+from rift.auto.session import NetconfSession
+
+def yield_vnfd_vnfr_pairs(proxy, nsr=None):
+    """
+    Yields tuples of vnfd & vnfr entries.
+
+    Args:
+        proxy (callable): Launchpad proxy
+        nsr (optional): If specified, only the vnfr & vnfd records of the NSR
+                are returned
+
+    Yields:
+        Tuple: VNFD and its corresponding VNFR entry
+    """
+    def get_vnfd(vnfd_id):
+        xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_id)
+        return proxy(RwVnfdYang).get(xpath)
+
+    vnfr = "/vnfr-catalog/vnfr"
+    print ("START")
+    vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True)
+    print ("STOP")
+    for vnfr in vnfrs.vnfr:
+
+        if nsr:
+            const_vnfr_ids = [const_vnfr.vnfr_id for const_vnfr in nsr.constituent_vnfr_ref]
+            if vnfr.id not in const_vnfr_ids:
+                continue
+
+        vnfd = get_vnfd(vnfr.vnfd_ref)
+        yield vnfd, vnfr
+
+def check_configuration_on_standby(standby_ip):
+    print ("Start- check_configuration_on_standby")
+    mgmt_session = NetconfSession(standby_ip)
+    mgmt_session.connect()
+    print ("Connected to proxy")
+
+    vnf_tuple = list(yield_vnfd_vnfr_pairs(mgmt_session.proxy))
+    assert len(vnf_tuple) == 2
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description='Test launchpad failover') 
+    parser.add_argument("--master-ip", action="store", dest="master_ip")
+    parser.add_argument("--standby-ip", action="store", dest="standby_ip")
+
+    args = parser.parse_args()
+
+    # 60 seconds should be more than enough time for Agent to be able
+    # to make confd as the new Master
+    time.sleep(60)
+    print ("Try fetching configuration from the old standby or the new Master\n")
+    check_configuration_on_standby(args.standby_ip)
diff --git a/rwlaunchpad/ra/pytest/test_launchpad.py b/rwlaunchpad/ra/pytest/test_launchpad.py
new file mode 100644
index 0000000..81f5b54
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/test_launchpad.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python3
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_launchpad.py
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 07/07/2016
+@brief System test of basic launchpad functionality
+"""
+
+import pytest
+
+import gi
+gi.require_version('RwsdnYang', '1.0')
+
+from gi.repository import RwsdnYang
+
+@pytest.mark.setup('sdn')
+@pytest.mark.feature('sdn')
+@pytest.mark.incremental
+class TestSdnSetup:
+    def test_create_odl_sdn_account(self, mgmt_session, sdn_account_name, sdn_account_type):
+        '''Configure sdn account
+
+        Asserts:
+            SDN name and accout type.
+        '''
+        proxy = mgmt_session.proxy(RwsdnYang)
+        sdn_account = RwsdnYang.SDNAccount(
+                name=sdn_account_name,
+                account_type=sdn_account_type)
+        xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
+        proxy.create_config(xpath, sdn_account)
+        sdn_account = proxy.get(xpath)
+
+@pytest.mark.depends('sdn')
+@pytest.mark.feature('sdn')
+@pytest.mark.incremental
+class TestSdn:
+    def test_show_odl_sdn_account(self, mgmt_session, sdn_account_name, sdn_account_type):
+        '''Showing sdn account configuration
+
+        Asserts:
+            sdn_account.account_type is what was configured
+        '''
+        proxy = mgmt_session.proxy(RwsdnYang)
+        xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
+        sdn_account = proxy.get_config(xpath)
+        assert sdn_account.account_type == sdn_account_type
+
+@pytest.mark.teardown('sdn')
+@pytest.mark.feature('sdn')
+@pytest.mark.incremental
+class TestSdnTeardown:
+    def test_delete_odl_sdn_account(self, mgmt_session, sdn_account_name):
+        '''Unconfigure sdn account'''
+        proxy = mgmt_session.proxy(RwsdnYang)
+        xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
+        proxy.delete_config(xpath)
+
+
+@pytest.mark.setup('launchpad')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestLaunchpadSetup:
+    def test_create_cloud_accounts(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+        '''Configure cloud accounts
+
+        Asserts:
+            Cloud name and cloud type details
+        '''
+        proxy = mgmt_session.proxy(cloud_module)
+        for cloud_account in cloud_accounts:
+            xpath = '{}[name="{}"]'.format(cloud_xpath, cloud_account.name)
+            proxy.replace_config(xpath, cloud_account)
+            response =  proxy.get(xpath)
+            assert response.name == cloud_account.name
+            assert response.account_type == cloud_account.account_type
+
+@pytest.mark.depends('launchpad')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestLaunchpad:
+    def test_account_connection_status(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+        '''Verify connection status on each cloud account
+
+        Asserts:
+            Cloud account is successfully connected
+        '''
+        proxy = mgmt_session.proxy(cloud_module)
+        for cloud_account in cloud_accounts:
+            proxy.wait_for(
+                '{}[name="{}"]/connection-status/status'.format(cloud_xpath, cloud_account.name),
+                'success',
+                timeout=30,
+                fail_on=['failure'])
+
+
+@pytest.mark.teardown('launchpad')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestLaunchpadTeardown:
+    def test_delete_cloud_accounts(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+        '''Unconfigure cloud_account'''
+        proxy = mgmt_session.proxy(cloud_module)
+        for cloud_account in cloud_accounts:
+            xpath = "{}[name='{}']".format(cloud_xpath, cloud_account.name)
+            proxy.delete_config(xpath)
diff --git a/rwlaunchpad/ra/pytest/test_launchpad_longevity.py b/rwlaunchpad/ra/pytest/test_launchpad_longevity.py
new file mode 100644
index 0000000..c8a4662
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/test_launchpad_longevity.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2016/01/04
+#
+
+import rift.vcs.vcs
+import time
+import gi
+
+def test_launchpad_longevity(mgmt_session, mgmt_domain_name):
+    time.sleep(60)
+    rift.vcs.vcs.wait_until_system_started(mgmt_session)
+
diff --git a/rwlaunchpad/ra/pytest/test_start_standby.py b/rwlaunchpad/ra/pytest/test_start_standby.py
new file mode 100755
index 0000000..cf0e5d9
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/test_start_standby.py
@@ -0,0 +1,78 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file  test_start_standby.py
+@brief This test starts the launchpad on a remote VM
+"""
+import argparse
+import sys
+import time
+import os
+import glob
+import subprocess
+import shlex
+import multiprocessing
+
+import rift.auto.session
+import rift.vcs.vcs
+
+def get_manifest_file():
+    artifacts_path = os.environ["RIFT_ARTIFACTS"]
+    manifest_files = glob.glob(artifacts_path + "/manifest*xml")
+    manifest_files.sort(key=lambda x: os.stat(x).st_mtime)
+    return manifest_files[0]
+
+def copy_manifest_to_remote(remote_ip, manifest_file):
+    print ("Copying manifest file {} to remote".format(manifest_file))
+    cmd = "scp {0} {1}:/tmp/manifest.xml".format(manifest_file, remote_ip)
+    print ("Running command: {}".format(cmd))
+    subprocess.check_call(cmd, shell=True)
+    
+
+def test_start_lp_remote(remote_ip):
+    rift_root = os.environ.get('HOME_RIFT', os.environ.get('RIFT_ROOT'))
+    rift_install = os.environ.get('RIFT_INSTALL')
+
+    copy_manifest_to_remote(remote_ip, get_manifest_file())
+
+    cmd_template = ("ssh_root {remote_ip} -q -o BatchMode=yes -o "
+    " UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -- "
+    " \"rm -rf /tmp/corosync; cd {rift_install}; {rift_root}/rift-shell -e -- {rift_install}/usr/bin/rwmain -m /tmp/manifest.xml\"").format(
+      remote_ip=remote_ip,
+      rift_root=rift_root,
+      rift_install=rift_install)
+
+    def start_lp(cmd):
+        print ("Running cmd: {}".format(cmd))
+        subprocess.call(shlex.split(cmd))
+
+    print ("Starting launchpad on remote VM: {}".format(cmd_template))
+    p = multiprocessing.Process(target=start_lp, args=(cmd_template,))
+    p.daemon = True
+    p.start()
+    print ("Standby system started")
+    time.sleep(60)
+    pass
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description='Start standby LP')
+    parser.add_argument("--remote-ip", action="store", dest="remote_ip")
+
+    args = parser.parse_args()
+
+    test_start_lp_remote(args.remote_ip)