update from RIFT as of 696b75d2fe9fb046261b08c616f1bcf6c0b54a9b second try

Signed-off-by: Jeremy Mordkoff <Jeremy.Mordkoff@riftio.com>
diff --git a/rwlaunchpad/ra/pytest/conftest.py b/rwlaunchpad/ra/pytest/conftest.py
index 80d739f..35a91d7 100644
--- a/rwlaunchpad/ra/pytest/conftest.py
+++ b/rwlaunchpad/ra/pytest/conftest.py
@@ -15,21 +15,61 @@
 #   limitations under the License.
 #
 
-import pytest
+import gi
+import itertools
+import logging
 import os
+import pytest
+import random
+import re
+import rwlogger
+import rw_peas
 import subprocess
 import sys
 
+import rift.auto.accounts
 import rift.auto.log
 import rift.auto.session
-import rift.vcs.vcs
 import rift.rwcal.openstack
-import logging
+import rift.vcs.vcs
 
-import gi
-gi.require_version('RwCloudYang', '1.0')
+from gi import require_version
+require_version('RwCloudYang', '1.0')
+require_version('RwTypes', '1.0')
+require_version('RwRbacPlatformYang', '1.0')
+require_version('RwUserYang', '1.0')
+require_version('RwProjectYang', '1.0')
+require_version('RwConmanYang', '1.0')
+require_version('RwRbacInternalYang', '1.0')
+require_version('RwRoAccountYang', '1.0')
 
-from gi.repository import RwCloudYang
+from gi.repository import (
+    RwCloudYang,
+    RwTypes,
+    RwUserYang,
+    RwProjectYang,
+    RwRbacPlatformYang,
+    RwConmanYang,
+    RwRbacInternalYang,
+    RwRoAccountYang
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+@pytest.fixture(scope='session')
+def use_accounts():
+    account_names = os.environ.get('RW_AUTO_ACCOUNTS')
+    if account_names:
+        return account_names.split(":")
+    return []
+
+@pytest.fixture(scope='session')
+def account_storage():
+    return rift.auto.accounts.Storage()
+
+@pytest.fixture(scope='session')
+def stored_accounts(account_storage):
+    return account_storage.list_cloud_accounts()
 
 @pytest.fixture(scope='session')
 def cloud_name_prefix():
@@ -37,9 +77,9 @@
     return 'cloud'
 
 @pytest.fixture(scope='session')
-def cloud_account_name(cloud_name_prefix):
+def cloud_account_name(cloud_account):
     '''fixture which returns the name used to identify the cloud account'''
-    return '{prefix}-0'.format(prefix=cloud_name_prefix)
+    return cloud_account.name
 
 @pytest.fixture(scope='session')
 def sdn_account_name():
@@ -47,6 +87,11 @@
     return 'sdn-0'
 
 @pytest.fixture(scope='session')
+def openstack_sdn_account_name():
+    '''fixture which returns the name used to identify the sdn account'''
+    return 'openstack-sdn-0'
+
+@pytest.fixture(scope='session')
 def sdn_account_type():
     '''fixture which returns the account type used by the sdn account'''
     return 'odl'
@@ -65,87 +110,405 @@
     Returns:
         xpath to be used when configure a cloud account
     '''
-    return '/cloud/account'
+    return '/rw-project:project[rw-project:name="default"]/cloud/account'
 
 @pytest.fixture(scope='session')
-def cloud_accounts(cloud_module, cloud_name_prefix, cloud_host, cloud_user, cloud_tenants, cloud_type):
+def cloud_accounts(request, cloud_module, cloud_name_prefix, cloud_host, cloud_user, cloud_tenants, cloud_type, stored_accounts, use_accounts, vim_host_override, vim_ssl_enabled, vim_user_domain_override, vim_project_domain_override, logger):
     '''fixture which returns a list of CloudAccounts. One per tenant provided
 
     Arguments:
-        cloud_module        - fixture: module defining cloud account
-        cloud_name_prefix   - fixture: name prefix used for cloud account
-        cloud_host          - fixture: cloud host address
-        cloud_user          - fixture: cloud account user key
-        cloud_tenants       - fixture: list of tenants to create cloud accounts on
-        cloud_type          - fixture: cloud account type
+        cloud_module                - fixture: module defining cloud account
+        cloud_name_prefix           - fixture: name prefix used for cloud account
+        cloud_host                  - fixture: cloud host address
+        cloud_user                  - fixture: cloud account user key
+        cloud_tenants               - fixture: list of tenants to create cloud accounts on
+        cloud_type                  - fixture: cloud account type
+        stored_accounts             - fixture: account storage
+        use_accounts                - fixture: use accounts from account storage
+        vim_host_override           - fixture: use specified vim instead of account's vim
+        vim_ssl_enabled             - fixture: enable or disable ssl regardless of accounts setting
+        vim_user_domain_override    - fixture: use specified user domain instead of account's user domain
+        vim_project_domain_override - fixture: use specified project domain instead of account's project domain
 
     Returns:
         A list of CloudAccounts
     '''
-    accounts = []
-    for idx, cloud_tenant in enumerate(cloud_tenants):
-        cloud_account_name = "{prefix}-{idx}".format(prefix=cloud_name_prefix, idx=idx)
 
-        if cloud_type == 'lxc':
-            accounts.append(
-                    cloud_module.CloudAccount.from_dict({
-                        "name": cloud_account_name,
-                        "account_type": "cloudsim_proxy"})
-            )
-        elif cloud_type == 'openstack':
-            password = 'mypasswd'
-            auth_url = 'http://{cloud_host}:5000/v3/'.format(cloud_host=cloud_host)
-            mgmt_network = os.getenv('MGMT_NETWORK', 'private')
-            accounts.append(
-                    cloud_module.CloudAccount.from_dict({
-                        'name':  cloud_account_name,
-                        'account_type': 'openstack',
-                        'openstack': {
-                            'admin': True,
-                            'key': cloud_user,
-                            'secret': password,
-                            'auth_url': auth_url,
-                            'tenant': cloud_tenant,
-                            'mgmt_network': mgmt_network}})
-            )
-        elif cloud_type == 'mock':
-            accounts.append(
-                    cloud_module.CloudAccount.from_dict({
-                        "name": cloud_account_name,
-                        "account_type": "mock"})
-            )
+
+    accounts = []
+
+    if use_accounts:
+        for account_name in stored_accounts:
+            if account_name in use_accounts:
+                if vim_host_override and stored_accounts[account_name].account_type == 'openstack':
+                    old_auth = stored_accounts[account_name].openstack.auth_url
+                    stored_accounts[account_name].openstack.auth_url = re.sub('(?:(?<=https://)|(?<=http://)).*?(?=:)', vim_host_override, old_auth)
+                if vim_ssl_enabled == False:
+                    stored_accounts[account_name].openstack.auth_url = re.sub(
+                        '^https',
+                        'http',
+                        stored_accounts[account_name].openstack.auth_url
+                    )
+                elif vim_ssl_enabled == True:
+                    stored_accounts[account_name].openstack.auth_url = re.sub(
+                        '^http(?=:)', 
+                        'https',
+                        stored_accounts[account_name].openstack.auth_url
+                    )
+                if vim_user_domain_override:
+                    stored_accounts[account_name].openstack.user_domain = vim_user_domain_override
+                if vim_project_domain_override:
+                    stored_accounts[account_name].openstack.project_domain = vim_project_domain_override
+                accounts.append(stored_accounts[account_name])
+    else:
+        def account_name_generator(prefix):
+            '''Generator of unique account names for a given prefix
+            Arguments:
+                prefix - prefix of account name
+            '''
+            idx=0
+            while True:
+                yield "{prefix}-{idx}".format(prefix=prefix, idx=idx)
+                idx+=1
+        name_gen = account_name_generator(cloud_name_prefix)
+
+        for cloud_tenant in cloud_tenants:
+            if cloud_type == 'lxc':
+                accounts.append(
+                        cloud_module.CloudAcc.from_dict({
+                            "name": next(name_gen),
+                            "account_type": "cloudsim_proxy"})
+                )
+            elif cloud_type == 'openstack':
+                hosts = [cloud_host]
+                if request.config.option.upload_images_multiple_accounts:
+                    hosts.append('10.66.4.32')
+                for host in hosts:
+                    password = 'mypasswd'
+                    auth_url = 'http://{host}:5000/v3/'.format(host=host)
+                    if vim_ssl_enabled == True:
+                        auth_url = 'https://{host}:5000/v3/'.format(host=host)
+                    mgmt_network = os.getenv('MGMT_NETWORK', 'private')
+                    accounts.append(
+                            cloud_module.YangData_RwProject_Project_Cloud_Account.from_dict({
+                                'name':  next(name_gen),
+                                'account_type': 'openstack',
+                                'openstack': {
+                                    'admin': True,
+                                    'key': cloud_user,
+                                    'secret': password,
+                                    'auth_url': auth_url,
+                                    'tenant': cloud_tenant,
+                                    'mgmt_network': mgmt_network,
+                                    'floating_ip_pool': 'public',
+                    }}))
+            elif cloud_type == 'mock':
+                accounts.append(
+                        cloud_module.CloudAcc.from_dict({
+                            "name": next(name_gen),
+                            "account_type": "mock"})
+                )
 
     return accounts
 
 
 @pytest.fixture(scope='session', autouse=True)
 def cloud_account(cloud_accounts):
-    '''fixture which returns an instance of CloudAccount
+    '''fixture which returns an instance of RwCloudYang.CloudAcc
 
     Arguments:
         cloud_accounts - fixture: list of generated cloud accounts
 
     Returns:
-        An instance of CloudAccount
+        An instance of RwCloudYang.CloudAcc
     '''
     return cloud_accounts[0]
 
 @pytest.fixture(scope='class')
-def openstack_client(cloud_host, cloud_user, cloud_tenant):
-    """Fixture which returns a session to openstack host.
+def vim_clients(cloud_accounts):
+    """Fixture which returns sessions to VIMs"""
+    vim_sessions = {}
+    for cloud_account in cloud_accounts:
+        if cloud_account.account_type == 'openstack':
+            vim_sessions[cloud_account.name] = rift.rwcal.openstack.OpenstackDriver(**{
+                'username': cloud_account.openstack.key,
+                'password': cloud_account.openstack.secret,
+                'auth_url': cloud_account.openstack.auth_url,
+                'project':  cloud_account.openstack.tenant,
+                'mgmt_network': cloud_account.openstack.mgmt_network,
+                'cert_validate': cloud_account.openstack.cert_validate,
+                'user_domain': cloud_account.openstack.user_domain,
+                'project_domain': cloud_account.openstack.project_domain,
+                'region': cloud_account.openstack.region
+            })
+            # Add initialization for other VIM types
+    return vim_sessions
 
-    Returns:
-        Session to an openstack host.
-    """
-    password = 'mypasswd'
-    auth_url = 'http://{cloud_host}:5000/v3/'.format(cloud_host=cloud_host)
-    mgmt_network = os.getenv('MGMT_NETWORK', 'private')
-    return rift.rwcal.openstack.OpenstackDriver(**{'username': cloud_user,
-                                                   'password': password,
-                                                   'auth_url': auth_url,
-                                                   'project' : cloud_tenant,
-                                                   'mgmt_network': mgmt_network,
-                                                   'cert_validate': False,
-                                                   'user_domain': 'Default',
-                                                   'project_domain':'Default',
-                                                   'region': 'RegionOne'})
+@pytest.fixture(scope='session')
+def openmano_prefix():
+    '''Fixture that returns the prefix to be used for openmano resource names'''
+    return 'openmano'
+
+@pytest.fixture(scope='session')
+def openmano_hosts(sut_host_names):
+    '''Fixture that returns the set of host logical names to be used for openmano'''
+    return [name for name in sut_host_names if 'openmano' in name]
+
+@pytest.fixture(scope='session')
+def openmano_accounts(openmano_hosts, sut_host_addrs, cloud_accounts, openmano_prefix, logger):
+    """Fixture that returns a list of Openmano accounts. One per host, and tenant provided"""
+    accounts=[]
+
+    if not openmano_hosts:
+        return accounts
+
+    host_cycle = itertools.cycle(openmano_hosts)
+    for cloud_account in cloud_accounts:
+        if cloud_account.account_type not in ['openstack']:
+            logger.warning('Skipping creating ro datacenter for cloud account [%s] - unsupported account type [%s]', cloud_account.name, cloud_account.account_type)
+            continue
+
+        try:
+            host = next(host_cycle)
+        except StopIteration:
+            break
+
+        if cloud_account.account_type == 'openstack':
+            accounts.append({
+                'account_name': "vim_%s" % cloud_account.name,
+                'openmano_tenant': host,
+                'openmano_addr': sut_host_addrs[host],
+                'openmano_port': 9090,
+                'datacenter': 'dc_%s' % (cloud_account.name),
+                'vim_account': cloud_account,
+                'vim_name': cloud_account.name,
+                'vim_type': cloud_account.account_type,
+                'vim_auth_url': cloud_account.openstack.auth_url, 
+                'vim_user':cloud_account.openstack.key,
+                'vim_password':cloud_account.openstack.secret,
+                'vim_tenant':cloud_account.openstack.tenant,
+            })
+
+    return accounts
+
+@pytest.fixture(scope='session')
+def ro_account_info(openmano_accounts):
+    ro_account_info = {}
+    for account in openmano_accounts:
+        ssh_cmd = (
+            'ssh {openmano_addr} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- '
+        ).format(
+            openmano_addr=account['openmano_addr']
+        )
+
+        if account['account_name'] not in ro_account_info:
+            tenant_create_cmd = (
+                '{ssh_cmd} openmano tenant-create {name}'
+            ).format(
+                ssh_cmd=ssh_cmd,
+                name=account['account_name']
+            )
+            tenant_info = subprocess.check_output(tenant_create_cmd, shell=True).decode('ascii')
+            (tenant_id, tenant_name) = tenant_info.split()
+            ro_account_info[account['account_name']] = {
+                'tenant_id':tenant_id,
+                'account': account,
+                'account_type':'openmano',
+                'host':account['openmano_addr'],
+                'port':9090,
+                'datacenters':[],
+            }
+        else:
+            tenant_id = ro_account_info[account['account_name']]['tenant_id']
+
+        datacenter_create_cmd = (
+            '{ssh_cmd} openmano datacenter-create --type {vim_type} {datacenter} {vim_auth_url}'
+        ).format(
+            ssh_cmd=ssh_cmd,
+            vim_type=account['vim_type'],
+            datacenter=account['datacenter'],
+            vim_auth_url=account['vim_auth_url']
+        )
+        datacenter_attach_cmd = (
+                '{ssh_cmd} OPENMANO_TENANT={tenant_id} openmano datacenter-attach {datacenter} --user={vim_user} '
+                '--password={vim_password} --vim-tenant-name={vim_tenant}'
+        ).format(
+            ssh_cmd=ssh_cmd,
+            tenant_id=tenant_id,
+            datacenter=account['datacenter'],
+            vim_user=account['vim_user'],
+            vim_password=account['vim_password'],
+            vim_tenant=account['vim_tenant']
+        )
+        subprocess.check_call(datacenter_create_cmd, shell=True)
+        subprocess.check_call(datacenter_attach_cmd, shell=True)
+
+        ro_account_info[account['account_name']]['datacenters'].append(account['datacenter'])
+    return ro_account_info
+
+
+@pytest.fixture(scope='session')
+def ro_accounts(ro_account_info):
+    '''Fixture that returns a map of RwRoAccountYang.ROAccount objects for each
+    account in ro_account_info
+    '''
+    ro_accounts = {}
+    for name, account_info in ro_account_info.items():
+        ro_accounts[name] = RwRoAccountYang.YangData_RwProject_Project_RoAccount_Account.from_dict({
+            'name':name,
+            'ro_account_type':account_info['account_type'],
+            'openmano':{
+                'host':account_info['host'],
+                'port':account_info['port'],
+                'tenant_id':account_info['tenant_id'],
+            }
+        })
+    return ro_accounts
+
+@pytest.fixture(scope='session')
+def ro_map(ro_account_info, ro_accounts):
+    '''Fixture that returns a map of vim name to datacenter / ro name tuples for each account in ro_account_info
+    '''
+    ro_map = {}
+    for account_name, account_info in ro_account_info.items():
+        vim_name = account_info['account']['vim_account'].name
+        datacenter_name = account_info['account']['datacenter']
+        ro_map[vim_name] = (account_name, datacenter_name)
+    return ro_map
+
+@pytest.fixture(scope='session')
+def cal(cloud_account):
+    """Fixture which returns cal interface"""
+    if cloud_account.account_type == 'openstack':
+        plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+    elif cloud_account.account_type == 'openvim':
+        plugin = rw_peas.PeasPlugin('rwcal_openmano_vimconnector', 'RwCal-1.0')
+    elif cloud_account.account_type == 'aws':
+        plugin = rw_peas.PeasPlugin('rwcal_aws', 'RwCal-1.0')
+    elif cloud_account.account_type == 'vsphere':
+        plugin = rw_peas.PeasPlugin('rwcal-python', 'RwCal-1.0')
+
+    engine, info, extension = plugin()
+    cal = plugin.get_interface("Cloud")
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+    rc = cal.init(rwloggerctx)
+    assert rc == RwTypes.RwStatus.SUCCESS
+
+    return cal
+
+@pytest.fixture(scope='session')
+def rbac_user_passwd():
+    """A common password being used for all rbac users."""
+    return 'mypasswd'
+
+@pytest.fixture(scope='session')
+def user_domain(tbac):
+    """user-domain being used in this rbac test."""
+    if tbac:
+        return 'tbacdomain'
+    return 'system'
+
+@pytest.fixture(scope='session')
+def platform_roles():
+    """Returns a tuple of platform roles"""
+    return ('rw-rbac-platform:platform-admin', 'rw-rbac-platform:platform-oper', 'rw-rbac-platform:super-admin')
+
+@pytest.fixture(scope='session')
+def user_roles():
+    """Returns a tuple of user roles"""
+    return ('rw-project:project-admin', 'rw-project:project-oper', 'rw-project-mano:catalog-oper', 'rw-project-mano:catalog-admin', 
+    'rw-project-mano:lcm-admin', 'rw-project-mano:lcm-oper', 'rw-project-mano:account-admin', 'rw-project-mano:account-oper',)
+
+@pytest.fixture(scope='session')
+def all_roles(platform_roles, user_roles):
+    """Returns a tuple of platform roles plus user roles"""
+    return platform_roles + user_roles
+
+@pytest.fixture(scope='session')
+def rw_user_proxy(mgmt_session):
+    return mgmt_session.proxy(RwUserYang)
+
+@pytest.fixture(scope='session')
+def rw_project_proxy(mgmt_session):
+    return mgmt_session.proxy(RwProjectYang)
+
+@pytest.fixture(scope='session')
+def rw_rbac_int_proxy(mgmt_session):
+    return mgmt_session.proxy(RwRbacInternalYang)
+
+@pytest.fixture(scope='session')
+def rw_ro_account_proxy(mgmt_session):
+    return mgmt_session.proxy(RwRoAccountYang)
+
+@pytest.fixture(scope='session')
+def rw_conman_proxy(mgmt_session):
+    return mgmt_session.proxy(RwConmanYang)
+
+@pytest.fixture(scope='session')
+def rbac_platform_proxy(mgmt_session):
+    return mgmt_session.proxy(RwRbacPlatformYang)
+
+@pytest.fixture(scope='session')
+def project_keyed_xpath():
+    return '/project[name={project_name}]'
+
+@pytest.fixture(scope='session')
+def user_keyed_xpath():
+    return "/user-config/user[user-name={user}][user-domain={domain}]"
+
+@pytest.fixture(scope='session')
+def platform_config_keyed_xpath():
+    return "/rbac-platform-config/user[user-name={user}][user-domain={domain}]"
+
+@pytest.fixture(scope='session')
+def fmt_vnfd_catalog_xpath():
+    """Fixture that returns vnfd-catalog keyed xpath"""
+    xpath = '/project[name={project}]/vnfd-catalog'
+    return xpath
+
+@pytest.fixture(scope='session')
+def fmt_vnfd_id_xpath():
+    """Fixture that returns vnfd id xpath"""
+    xpath = '/rw-project:project[rw-project:name={project}]/project-vnfd:vnfd-catalog/project-vnfd:vnfd[project-vnfd:id={vnfd_id}]'
+    return xpath
+
+@pytest.fixture(scope='session')
+def fmt_nsd_catalog_xpath():
+    """Fixture that returns nsd-catalog keyed xpath"""
+    xpath = '/project[name={project}]/nsd-catalog'
+    return xpath
+
+@pytest.fixture(scope='session')
+def fmt_nsd_id_xpath():
+    """Fixture that returns nsd id xpath"""
+    xpath = '/rw-project:project[rw-project:name={project}]/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id={nsd_id}]'
+    return xpath
+
+@pytest.fixture(scope='session')
+def fmt_prefixed_cloud_xpath():
+    """Fixture that returns cloud keyed xpath"""
+    xpath = '/rw-project:project[rw-project:name={project}]/rw-cloud:cloud/rw-cloud:account[rw-cloud:name={account_name}]'
+    return xpath
+
+@pytest.fixture(scope='session')
+def fmt_cloud_xpath():
+    """Fixture that returns cloud keyed xpath without yang prefix"""
+    xpath = '/project[name={project}]/cloud/account[name={account_name}]'
+    return xpath
+
+@pytest.fixture(scope='session', autouse=True)
+def launchpad_glance_api_log():
+    log_file = os.path.join(
+        os.environ.get('HOME_RIFT', os.environ.get('RIFT_INSTALL')),
+        'var','rift','log','glance','glance-api.log'
+    )
+    return log_file
+
+@pytest.fixture(scope='session', autouse=True)
+def _glance_api_scraper_session(request, log_manager, confd_host, launchpad_glance_api_log):
+    '''Fixture which returns an instance of rift.auto.log.FileSource to scrape
+    the glance api logs of the launchpad host
+    '''
+    scraper = rift.auto.log.FileSource(host=confd_host, path=launchpad_glance_api_log)
+    log_manager.source(source=scraper)
+    return scraper
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py b/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py
index a3c565b..b8dcf6c 100644
--- a/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py
@@ -1,5 +1,5 @@
 
-# 
+#
 #   Copyright 2016 RIFT.IO Inc
 #
 #   Licensed under the Apache License, Version 2.0 (the "License");
@@ -23,13 +23,13 @@
 import tempfile
 
 from gi.repository import (
-    NsdYang,
+    ProjectNsdYang as NsdYang,
     NsrYang,
     RwNsrYang,
     RwVnfrYang,
     VnfrYang,
     VldYang,
-    RwVnfdYang,
+    RwProjectVnfdYang as RwVnfdYang,
     RwLaunchpadYang,
     RwBaseYang
 )
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py
index 557518b..69a9716 100755
--- a/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py
@@ -22,6 +22,7 @@
 @brief Scriptable load-balancer test with multi-vm VNFs
 """
 
+import gi
 import json
 import logging
 import os
@@ -33,15 +34,17 @@
 import uuid
 
 from gi.repository import (
-    NsdYang,
+    RwProjectNsdYang,
     NsrYang,
     RwNsrYang,
     VnfrYang,
     VldYang,
-    RwVnfdYang,
+    RwProjectVnfdYang,
     RwLaunchpadYang,
     RwBaseYang
 )
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
 
 import rift.auto.mano
 
@@ -71,7 +74,7 @@
     Return:
          NSR object
     """
-    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+    nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
 
     nsr.id = str(uuid.uuid4())
     nsr.name = rift.auto.mano.resource_name(nsr.id)
@@ -80,7 +83,7 @@
     nsr.nsd_ref = nsd_id
     nsr.admin_status = "ENABLED"
     nsr.input_parameter.extend(input_param_list)
-    nsr.cloud_account = cloud_account_name
+    nsr.datacenter = cloud_account_name
 
     return nsr
 
@@ -103,10 +106,10 @@
     pass
 
 
-def wait_onboard_transaction_finished(logger, transaction_id, timeout=10, host="127.0.0.1"):
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=10, host="127.0.0.1", project="default"):
     logger.info("Waiting for onboard trans_id %s to complete", transaction_id)
     def check_status_onboard_status():
-        uri = 'http://%s:4567/api/upload/%s/state' % (host, transaction_id)
+        uri = 'http://%s:8008/api/operational/project/%s/create-jobs/job/%s' % (host, project, transaction_id)
         curl_cmd = 'curl --insecure {uri}'.format(
                 uri=uri
                 )
@@ -151,7 +154,7 @@
         trans_id = upload_descriptor(logger, trafgen_vnfd_package_file, launchpad_host)
         wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
 
-        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
         vnfds = catalog.vnfd
         assert len(vnfds) == 1, "There should only be a single vnfd"
         vnfd = vnfds[0]
@@ -163,7 +166,7 @@
         trans_id = upload_descriptor(logger, trafsink_vnfd_package_file, launchpad_host)
         wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
 
-        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
         vnfds = catalog.vnfd
         assert len(vnfds) == 2, "There should be two vnfds"
         assert "multivm_trafsink_vnfd" in [vnfds[0].name, vnfds[1].name]
@@ -174,7 +177,7 @@
         trans_id = upload_descriptor(logger, slb_vnfd_package_file, launchpad_host)
         wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
 
-        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
         vnfds = catalog.vnfd
         assert len(vnfds) == 3, "There should be two vnfds"
         assert "multivm_slb_vnfd" in [vnfds[0].name, vnfds[1].name]
@@ -184,7 +187,7 @@
         trans_id = upload_descriptor(logger, multi_vm_vnf_nsd_package_file, launchpad_host)
         wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
 
-        catalog = nsd_proxy.get_config('/nsd-catalog')
+        catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
         nsds = catalog.nsd
         assert len(nsds) == 1, "There should only be a single nsd"
         nsd = nsds[0]
@@ -206,15 +209,15 @@
                                                                            config_param.value,
                                                                            running_nsr_config.input_parameter))
 
-        catalog = nsd_proxy.get_config('/nsd-catalog')
+        catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
         nsd = catalog.nsd[0]
 
         input_parameters = []
-        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+        descr_xpath = "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id=%s]/project-nsd:description" % quoted_key(nsd.id)
         descr_value = "New NSD Description"
         in_param_id = str(uuid.uuid4())
 
-        input_param_1= NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+        input_param_1= NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
                                                                 xpath=descr_xpath,
                                                                 value=descr_value)
 
@@ -223,20 +226,20 @@
         nsr = create_nsr(nsd.id, input_parameters, cloud_account_name)
 
         logger.info("Instantiating the Network Service")
-        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+        rwnsr_proxy.create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr)
 
-        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
         nsrs = nsr_opdata.nsr
 
         # Verify the input parameter configuration
-        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        running_config = rwnsr_proxy.get_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id=%s]" % quoted_key(nsr.id))
         for input_param in input_parameters:
             verify_input_parameters(running_config, input_param)
 
         assert len(nsrs) == 1
         assert nsrs[0].ns_instance_config_ref == nsr.id
 
-        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.id)
+        xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(quoted_key(nsr.id))
         rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=360)
 
 
@@ -254,11 +257,11 @@
         """
         logger.debug("Terminating Multi VM VNF's NSR")
 
-        nsr_path = "/ns-instance-config"
+        nsr_path = "/rw-project:project[rw-project:name='default']/ns-instance-config"
         nsr = rwnsr_proxy.get_config(nsr_path)
 
         ping_pong = nsr.nsr[0]
-        rwnsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(ping_pong.id))
+        rwnsr_proxy.delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(quoted_key(ping_pong.id)))
         time.sleep(30)
 
 
@@ -268,19 +271,19 @@
         Asserts:
             The records are deleted.
         """
-        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
         for nsd in nsds.nsd:
-            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(quoted_key(nsd.id))
             nsd_proxy.delete_config(xpath)
 
-        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
         for vnfd_record in vnfds.vnfd:
-            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
             vnfd_proxy.delete_config(xpath)
 
         time.sleep(5)
-        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
         assert nsds is None or len(nsds.nsd) == 0
 
-        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
         assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py
index ca6e9b5..4c030d7 100755
--- a/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py
@@ -22,6 +22,7 @@
 @brief Scriptable load-balancer test with multi-vm VNFs
 """
 
+import gi
 import json
 import logging
 import os
@@ -33,15 +34,17 @@
 import uuid
 
 from gi.repository import (
-    NsdYang,
+    RwProjectNsdYang,
     NsrYang,
     RwNsrYang,
     VnfrYang,
     VldYang,
-    RwVnfdYang,
+    RwProjectVnfdYang,
     RwLaunchpadYang,
     RwBaseYang
 )
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
 
 import rift.auto.mano
 
@@ -78,7 +81,7 @@
     Return:
          NSR object
     """
-    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+    nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
 
     nsr.id = str(uuid.uuid4())
     nsr.name = rift.auto.mano.resource_name(nsr.id)
@@ -87,7 +90,7 @@
     nsr.nsd_ref = nsd_id
     nsr.admin_status = "ENABLED"
     nsr.input_parameter.extend(input_param_list)
-    nsr.cloud_account = cloud_account_name
+    nsr.datacenter = cloud_account_name
 
     return nsr
 
@@ -110,10 +113,10 @@
     pass
 
 
-def wait_onboard_transaction_finished(logger, transaction_id, timeout=10, host="127.0.0.1"):
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=10, host="127.0.0.1", project="default"):
     logger.info("Waiting for onboard trans_id %s to complete", transaction_id)
     def check_status_onboard_status():
-        uri = 'http://%s:4567/api/upload/%s/state' % (host, transaction_id)
+        uri = 'http://%s:8008/api/operational/project/%s/create-jobs/job/%s' % (host, project, transaction_id)
         curl_cmd = 'curl --insecure {uri}'.format(
                 uri=uri
                 )
@@ -158,7 +161,7 @@
         trans_id = upload_descriptor(logger, trafgen_vnfd_package_file, launchpad_host)
         wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
 
-        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
         vnfds = catalog.vnfd
         assert len(vnfds) == 1, "There should only be a single vnfd"
         vnfd = vnfds[0]
@@ -170,7 +173,7 @@
         trans_id = upload_descriptor(logger, trafsink_vnfd_package_file, launchpad_host)
         wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
 
-        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
         vnfds = catalog.vnfd
         assert len(vnfds) == 2, "There should be two vnfds"
         assert "multivm_trafsink_vnfd" in [vnfds[0].name, vnfds[1].name]
@@ -180,7 +183,7 @@
         trans_id = upload_descriptor(logger, multi_vm_vnf_nsd_package_file, launchpad_host)
         wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
 
-        catalog = nsd_proxy.get_config('/nsd-catalog')
+        catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
         nsds = catalog.nsd
         assert len(nsds) == 1, "There should only be a single nsd"
         nsd = nsds[0]
@@ -202,15 +205,15 @@
                                                                            config_param.value,
                                                                            running_nsr_config.input_parameter))
 
-        catalog = nsd_proxy.get_config('/nsd-catalog')
+        catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
         nsd = catalog.nsd[0]
 
         input_parameters = []
-        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+        descr_xpath = "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id=%s]/project-nsd:description" % quoted_key(nsd.id)
         descr_value = "New NSD Description"
         in_param_id = str(uuid.uuid4())
 
-        input_param_1= NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+        input_param_1= NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
                                                                 xpath=descr_xpath,
                                                                 value=descr_value)
 
@@ -219,20 +222,20 @@
         nsr = create_nsr(nsd.id, input_parameters, cloud_account_name)
 
         logger.info("Instantiating the Network Service")
-        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+        rwnsr_proxy.create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr)
 
-        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
         nsrs = nsr_opdata.nsr
 
         # Verify the input parameter configuration
-        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        running_config = rwnsr_proxy.get_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id=%s]" % quoted_key(nsr.id))
         for input_param in input_parameters:
             verify_input_parameters(running_config, input_param)
 
         assert len(nsrs) == 1
         assert nsrs[0].ns_instance_config_ref == nsr.id
 
-        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.id)
+        xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(quoted_key(nsr.id))
         rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=360)
 
 
@@ -250,11 +253,11 @@
         """
         logger.debug("Terminating Multi VM VNF's NSR")
 
-        nsr_path = "/ns-instance-config"
+        nsr_path = "/rw-project:project[rw-project:name='default']/ns-instance-config"
         nsr = rwnsr_proxy.get_config(nsr_path)
 
         ping_pong = nsr.nsr[0]
-        rwnsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(ping_pong.id))
+        rwnsr_proxy.delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(quoted_key(ping_pong.id)))
         time.sleep(30)
 
 
@@ -264,19 +267,19 @@
         Asserts:
             The records are deleted.
         """
-        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
         for nsd in nsds.nsd:
-            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(quoted_key(nsd.id))
             nsd_proxy.delete_config(xpath)
 
-        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
         for vnfd_record in vnfds.vnfd:
-            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
             vnfd_proxy.delete_config(xpath)
 
         time.sleep(5)
-        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
         assert nsds is None or len(nsds.nsd) == 0
 
-        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
         assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/test_trafgen_data.py b/rwlaunchpad/ra/pytest/multivm_vnf/test_trafgen_data.py
index 197e95c..958df6e 100644
--- a/rwlaunchpad/ra/pytest/multivm_vnf/test_trafgen_data.py
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/test_trafgen_data.py
@@ -22,6 +22,7 @@
 @brief Scriptable load-balancer test with multi-vm VNFs
 """
 
+import gi
 import ipaddress
 import pytest
 import re
@@ -37,11 +38,13 @@
     RwVnfBaseConfigYang,
     RwTrafgenYang
 )
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
 
 
 @pytest.fixture(scope='session')
 def trafgen_vnfr(request, rwvnfr_proxy, session_type):
-    vnfr = "/vnfr-catalog/vnfr"
+    vnfr = "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr"
     vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
     for vnfr in vnfrs.vnfr:
         if 'trafgen' in vnfr.short_name:
@@ -94,7 +97,7 @@
     Arguments:
         vnf_name - vnf name of configuration
     '''
-    xpath = "/vnf-config/vnf[name='%s'][instance='0']" % vnf_name
+    xpath = "/rw-project:project[rw-project:name='default']/vnf-config/vnf[name=%s][instance='0']" % quoted_key(vnf_name)
     for _ in range(24):
         tg_config = tgcfg_proxy.get_config(xpath)
         if tg_config is not None:
@@ -154,8 +157,8 @@
         '''
         return (int(current_sample) - int(previous_sample)) > threshold
 
-    xpath = "/vnf-opdata/vnf[name='{}'][instance='0']/port-state[portname='{}']/counters/{}"
-    vnfdata_proxy.wait_for_interval(xpath.format(vnf_name, port_name, 'input-packets'),
+    xpath = "/rw-project:project[rw-project:name='default']/vnf-opdata/vnf[name={}][instance='0']/port-state[portname={}]/counters/{}"
+    vnfdata_proxy.wait_for_interval(xpath.format(quoted_key(vnf_name), quoted_key(port_name), quoted_key('input-packets')),
                                     value_incremented, timeout=timeout, interval=interval)
 
 
@@ -178,8 +181,8 @@
         '''
         return (int(current_sample) - int(previous_sample)) < threshold
 
-    xpath = "/vnf-opdata/vnf[name='{}'][instance='0']/port-state[portname='{}']/counters/{}"
-    vnfdata_proxy.wait_for_interval(xpath.format(vnf_name, port_name, 'input-packets'), value_unchanged, timeout=timeout, interval=interval)
+    xpath = "/rw-project:project[rw-project:name='default']/vnf-opdata/vnf[name={}][instance='0']/port-state[portname={}]/counters/{}"
+    vnfdata_proxy.wait_for_interval(xpath.format(quoted_key(vnf_name), quoted_key(port_name), quoted_key('input-packets')), value_unchanged, timeout=timeout, interval=interval)
 
 @pytest.mark.depends('multivmvnf')
 @pytest.mark.incremental
diff --git a/rwlaunchpad/ra/pytest/ns/conftest.py b/rwlaunchpad/ra/pytest/ns/conftest.py
index a1fa446..a2a9434 100644
--- a/rwlaunchpad/ra/pytest/ns/conftest.py
+++ b/rwlaunchpad/ra/pytest/ns/conftest.py
@@ -22,19 +22,41 @@
 import tempfile
 import shutil
 import subprocess
+import random
 
 import gi
+import rift.auto.descriptor
 import rift.auto.session
-import rift.mano.examples.ping_pong_nsd as ping_pong
+import rift.mano.examples.ping_pong_nsd as ping_pong_example
 import rift.vcs.vcs
 
 class PackageError(Exception):
     pass
 
 @pytest.fixture(scope='session', autouse=True)
-def cloud_account_name(request):
-    '''fixture which returns the name used to identify the cloud account'''
-    return 'cloud-0'
+def multidisk_testdata(request, descriptor_images, path_ping_image, path_pong_image):
+    """fixture which returns test data related to multidisk test"""
+
+    if not request.config.option.multidisk:
+        return None
+
+    iso_img, qcow2_img = [os.path.basename(image) for image in descriptor_images]
+    
+    ping_ = {'vda': ['disk', 'virtio', 5, os.path.basename(path_ping_image), 0],
+             'sda': ['cdrom', 'scsi', 5, iso_img, 1],
+             'hda': ['disk', 'ide', 5, None, None],
+             }
+
+    pong_ = {'vda': ['disk', 'virtio', 5, os.path.basename(path_pong_image), 0],
+             'hda': ['cdrom', 'ide', 5, iso_img, 1],
+             'hdb': ['disk', 'ide', 5, qcow2_img, 2],
+             }
+    return ping_, pong_
+
+@pytest.fixture(scope='session')
+def port_sequencing_intf_positions():
+    """fixture which returns a list of ordered positions for pong interfaces related to port sequencing test"""
+    return random.sample(range(1, 2**32-1), 3)
 
 @pytest.fixture(scope='session')
 def ping_pong_install_dir():
@@ -116,6 +138,11 @@
     return image_dirs
 
 @pytest.fixture(scope='session')
+def random_image_name(image_dirs):
+    """Fixture which returns a random image name"""
+    return 'image_systemtest_{}.qcow2'.format(random.randint(100, 9999))
+
+@pytest.fixture(scope='session')
 def image_paths(image_dirs):
     ''' Fixture containing a mapping of image names to their path images
 
@@ -147,12 +174,92 @@
     '''
     return image_paths["Fedora-x86_64-20-20131211.1-sda-pong.qcow2"]
 
+@pytest.fixture(scope='session')
+def rsyslog_userdata(rsyslog_host, rsyslog_port):
+    ''' Fixture providing rsyslog user data
+    Arguments:
+        rsyslog_host - host of the rsyslog process
+        rsyslog_port - port of the rsyslog process
+    '''
+    if rsyslog_host and rsyslog_port:
+        return '''
+rsyslog:
+  - "$ActionForwardDefaultTemplate RSYSLOG_ForwardFormat"
+  - "*.* @{host}:{port}"
+        '''.format(
+            host=rsyslog_host,
+            port=rsyslog_port,
+        )
+
+    return None
+
+@pytest.fixture(scope='session')
+def descriptors_pingpong_vnf_input_params():
+    return ping_pong_example.generate_ping_pong_descriptors(
+        pingcount=1,
+        nsd_name='pp_input_nsd',
+        vnfd_input_params=True,
+    )
+
+@pytest.fixture(scope='session')
+def packages_pingpong_vnf_input_params(descriptors_pingpong_vnf_input_params):
+    return rift.auto.descriptor.generate_descriptor_packages(descriptors_pingpong_vnf_input_params)
+
+@pytest.fixture(scope='session')
+def ping_script_userdata():
+    userdata = '''#cloud-config
+password: fedora
+chpasswd: { expire: False }
+ssh_pwauth: True
+runcmd:
+  - [ systemctl, daemon-reload ]
+  - [ systemctl, enable, {{ CI-script-init-data }}.service ]
+  - [ systemctl, start, --no-block, {{ CI-script-init-data }}.service ]
+  - [ ifup, eth1 ]
+'''
+    return userdata
+
+@pytest.fixture(scope='session')
+def pong_script_userdata():
+    userdata = '''#!/bin/bash
+sed 's/^.*PasswordAuthentication.*$/PasswordAuthentication yes/' < /etc/ssh/sshd_config > /etc/ssh/sshd_config
+systemctl daemon-reload
+systemctl enable {{ CI-script-init-data }}.service
+systemctl start --no-block {{ CI-script-init-data }}.service
+ifup eth1
+'''
+    return userdata
+
+@pytest.fixture(scope='session')
+def descriptors_pingpong_script_input_params(ping_script_userdata, pong_script_userdata):
+    return ping_pong_example.generate_ping_pong_descriptors(
+            pingcount=1,
+            nsd_name='pp_script_nsd',
+            script_input_params=True,
+            ping_userdata=ping_script_userdata,
+            pong_userdata=pong_script_userdata,
+    )
+
+@pytest.fixture(scope='session')
+def packages_pingpong_script_input_params(descriptors_pingpong_script_input_params):
+    return rift.auto.descriptor.generate_descriptor_packages(descriptors_pingpong_script_input_params)
+
 class PingPongFactory:
-    def __init__(self, path_ping_image, path_pong_image, rsyslog_host, rsyslog_port):
+    def __init__(self, path_ping_image, path_pong_image, static_ip, vnf_dependencies, rsyslog_userdata, port_security, metadata_vdud, multidisk, ipv6, port_sequencing, service_primitive):
+
         self.path_ping_image = path_ping_image
         self.path_pong_image = path_pong_image
-        self.rsyslog_host = rsyslog_host
-        self.rsyslog_port = rsyslog_port
+        self.rsyslog_userdata = rsyslog_userdata
+        self.static_ip = static_ip
+        self.service_primitive = service_primitive
+        self.use_vca_conf = vnf_dependencies
+        self.port_security = port_security
+        self.port_sequencing = port_sequencing
+        self.metadata_vdud = metadata_vdud
+        self.multidisk = multidisk
+        self.ipv6 = ipv6
+        if not port_security:
+            self.port_security = None   # Not to disable port security if its not specific to --port-security feature.
 
     def generate_descriptors(self):
         '''Return a new set of ping and pong descriptors
@@ -167,32 +274,29 @@
         ping_md5sum = md5sum(self.path_ping_image)
         pong_md5sum = md5sum(self.path_pong_image)
 
-        ex_userdata = None
-        if self.rsyslog_host and self.rsyslog_port:
-            ex_userdata = '''
-rsyslog:
-  - "$ActionForwardDefaultTemplate RSYSLOG_ForwardFormat"
-  - "*.* @{host}:{port}"
-            '''.format(
-                host=self.rsyslog_host,
-                port=self.rsyslog_port,
-            )
-
-        descriptors = ping_pong.generate_ping_pong_descriptors(
+        descriptors = ping_pong_example.generate_ping_pong_descriptors(
                 pingcount=1,
                 ping_md5sum=ping_md5sum,
                 pong_md5sum=pong_md5sum,
-                ex_ping_userdata=ex_userdata,
-                ex_pong_userdata=ex_userdata,
+                ex_ping_userdata=self.rsyslog_userdata,
+                ex_pong_userdata=self.rsyslog_userdata,
+                use_static_ip=self.static_ip,
+                port_security=self.port_security,
+                explicit_port_seq=self.port_sequencing,
+                metadata_vdud=self.metadata_vdud,
+                use_vca_conf=self.use_vca_conf,
+                multidisk=self.multidisk,
+                use_ipv6=self.ipv6,
+                primitive_test=self.service_primitive,
         )
 
         return descriptors
 
 @pytest.fixture(scope='session')
-def ping_pong_factory(path_ping_image, path_pong_image, rsyslog_host, rsyslog_port):
+def ping_pong_factory(path_ping_image, path_pong_image, static_ip, vnf_dependencies, rsyslog_userdata, port_security, metadata_vdud, multidisk_testdata, ipv6, port_sequencing, service_primitive):
     '''Fixture returns a factory capable of generating ping and pong descriptors
     '''
-    return PingPongFactory(path_ping_image, path_pong_image, rsyslog_host, rsyslog_port)
+    return PingPongFactory(path_ping_image, path_pong_image, static_ip, vnf_dependencies, rsyslog_userdata, port_security, metadata_vdud, multidisk_testdata, ipv6, port_sequencing, service_primitive)
 
 @pytest.fixture(scope='session')
 def ping_pong_records(ping_pong_factory):
@@ -202,7 +306,7 @@
 
 
 @pytest.fixture(scope='session')
-def descriptors(request, ping_pong_records):
+def descriptors(request, ping_pong_records, random_image_name):
     def pingpong_descriptors(with_images=True):
         """Generated the VNFDs & NSD files for pingpong NS.
 
@@ -232,8 +336,7 @@
                         'images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2')
 
         for descriptor in [ping_vnfd, pong_vnfd, ping_pong_nsd]:
-            descriptor.write_to_file(output_format='xml', outdir=tmpdir)
-
+            descriptor.write_to_file(output_format='yaml', outdir=tmpdir)
         ping_img_path = os.path.join(tmpdir, "{}/images/".format(ping_vnfd.name))
         pong_img_path = os.path.join(tmpdir, "{}/images/".format(pong_vnfd.name))
 
@@ -243,9 +346,13 @@
             shutil.copy(ping_img, ping_img_path)
             shutil.copy(pong_img, pong_img_path)
 
+        if request.config.option.upload_images_multiple_accounts:
+            with open(os.path.join(ping_img_path, random_image_name), 'wb') as image_bin_file:
+                image_bin_file.seek(1024*1024*512)  # image file of size 512 MB
+                image_bin_file.write(b'0')
+
         for dir_name in [ping_vnfd.name, pong_vnfd.name, ping_pong_nsd.name]:
             subprocess.call([
-                    "sh",
                     "{rift_install}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh".format(rift_install=os.environ['RIFT_INSTALL']),
                     tmpdir,
                     dir_name])
@@ -266,8 +373,43 @@
 
         return files
 
+    def l2portchain_descriptors():
+        """L2  port chaining packages"""
+        files = [
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/vnffg_demo_nsd/vnffg_l2portchain_dpi_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/vnffg_demo_nsd/vnffg_l2portchain_firewall_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/vnffg_demo_nsd/vnffg_l2portchain_nat_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/vnffg_demo_nsd/vnffg_l2portchain_pgw_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/vnffg_demo_nsd/vnffg_l2portchain_router_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/vnffg_demo_nsd/vnffg_l2portchain_sff_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/vnffg_demo_nsd/vnffg_l2portchain_demo_nsd.tar.gz")
+            ]
+
+        return files
+
+    def metadata_vdud_cfgfile_descriptors():
+        """Metadata-vdud feature related packages"""
+        files = [
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/cfgfile/cirros_cfgfile_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/cfgfile/fedora_cfgfile_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/cfgfile/ubuntu_cfgfile_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/cfgfile/cfgfile_nsd.tar.gz")
+            ]
+
+        return files
+        
+    if request.config.option.vnf_onboard_delete:
+        return haproxy_descriptors() + l2portchain_descriptors() + list(pingpong_descriptors())
+    if request.config.option.multiple_ns_instantiate:
+        return haproxy_descriptors() + metadata_vdud_cfgfile_descriptors() + list(pingpong_descriptors())
+    if request.config.option.l2_port_chaining:
+        return l2portchain_descriptors()
+    if request.config.option.metadata_vdud_cfgfile:
+        return metadata_vdud_cfgfile_descriptors()
     if request.config.option.network_service == "pingpong":
         return pingpong_descriptors()
+    elif request.config.option.ha_multiple_failovers:
+        return {'pingpong': pingpong_descriptors(), 'haproxy': haproxy_descriptors(), 'vdud_cfgfile': metadata_vdud_cfgfile_descriptors()}
     elif request.config.option.network_service == "pingpong_noimg":
         return pingpong_descriptors(with_images=False)
     elif request.config.option.network_service == "haproxy":
@@ -286,7 +428,37 @@
 
         return images
 
+    def l2portchain_images():
+        """HAProxy images."""
+        images = [os.path.join(os.getenv('RIFT_ROOT'), "images/ubuntu_trusty_1404.qcow2")]
+        return images
+
+    def multidisk_images():
+        images = [
+            os.path.join(os.getenv('RIFT_ROOT'), 'images/ubuntu-16.04-mini-64.iso'),
+            os.path.join(os.getenv('RIFT_ROOT'), "images/ubuntu_trusty_1404.qcow2"),
+            ]
+        return images
+
+    def metadata_vdud_cfgfile_images():
+        """Metadata-vdud feature related images."""
+        images = [
+            os.path.join(os.getenv('RIFT_ROOT'), "images/cirros-0.3.4-x86_64-disk.img"),
+            os.path.join(os.getenv('RIFT_ROOT'), "images/Fedora-x86_64-20-20131211.1-sda.qcow2"),
+            os.path.join(os.getenv('RIFT_ROOT'), "images/UbuntuXenial")
+            ]
+
+        return images
+
+    if request.config.option.l2_port_chaining:
+        return l2portchain_images()
+    if request.config.option.multidisk:
+        return multidisk_images()
+    if request.config.option.metadata_vdud_cfgfile:
+        return metadata_vdud_cfgfile_images()
     if request.config.option.network_service == "haproxy":
         return haproxy_images()
+    if request.config.option.multiple_ns_instantiate:
+        return haproxy_images() + metadata_vdud_cfgfile_images()
 
     return []
diff --git a/rwlaunchpad/ra/pytest/ns/gui_tests/conftest.py b/rwlaunchpad/ra/pytest/ns/gui_tests/conftest.py
new file mode 100755
index 0000000..77261e9
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/gui_tests/conftest.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import gi
+import pytest
+import os
+from pyvirtualdisplay import Display
+from selenium import webdriver
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.common.by import By
+
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwConfigAgentYang', '1.0')
+gi.require_version('RwSdnYang', '1.0')
+
+from gi.repository import (
+    RwSdnYang,
+    RwCloudYang,
+    RwConfigAgentYang,
+)
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+@pytest.fixture(scope='session')
+def cloud_proxy(mgmt_session):
+    """cloud_proxy."""
+    return mgmt_session.proxy(RwCloudYang)
+
+
+@pytest.fixture(scope='session')
+def sdn_proxy(mgmt_session):
+    """sdn_proxy."""
+    return mgmt_session.proxy(RwSdnYang)
+
+
+@pytest.fixture(scope='session')
+def config_agent_proxy(mgmt_session):
+    """config_agent_proxy."""
+    return mgmt_session.proxy(RwConfigAgentYang)
+
+
+@pytest.fixture(scope='session')
+def driver(request, confd_host, logger):
+    """Set up virtual diplay and browser driver."""
+    # Set up the virtual display
+    display = Display(visible=0, size=(1024, 768))
+    display.start()
+
+    logger.info("Initializing the chrome web driver")
+    root_dir = os.environ.get('RIFT_ROOT')
+    webdriver_path = '{}/chromedriver'.format(root_dir)
+    # webdriver_path = os.environ["webdriver.chrome.driver"]
+    # Something like this should be implemented.
+
+    driver_ = webdriver.Chrome(executable_path=webdriver_path)
+    driver_.implicitly_wait(5)
+    url = "http://{}:8000/".format(confd_host)
+    logger.info("Getting the URL {}".format(url))
+    driver_.get(url)
+    WebDriverWait(driver_, 10).until(
+        EC.presence_of_element_located((By.CLASS_NAME, "logo"))
+    )
+
+    logger.info("Signing into the Rift home page")
+    driver_.find_element_by_name("username").send_keys("admin")
+    driver_.find_element_by_name("password").send_keys("admin")
+    driver_.find_element_by_id("submit").click()
+    WebDriverWait(driver_, 10).until(
+        EC.presence_of_element_located((By.CLASS_NAME, "skyquakeNav"))
+    )
+
+    def teardown():
+        driver_.quit()
+        display.stop()
+
+    yield driver_
diff --git a/rwlaunchpad/ra/pytest/ns/gui_tests/test_launchpad_ui.py b/rwlaunchpad/ra/pytest/ns/gui_tests/test_launchpad_ui.py
new file mode 100755
index 0000000..dd4e32e
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/gui_tests/test_launchpad_ui.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python
+#
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import gi
+
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.common.by import By
+
+
+gi.require_version('RwUserYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+
+from gi.repository import (
+    RwUserYang,
+    RwProjectYang,
+    RwConmanYang
+)
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+class TestGUI(object):
+    """TestGUI."""
+
+    def click_element_and_wait(self, driver, key_word, wait=True):
+        """Click and wait for that element to appear."""
+        path = "//a[text()={}]".format(quoted_key(key_word))
+        driver.find_element_by_xpath(path).click()
+        if wait is True:
+            WebDriverWait(driver, 10).until(
+                EC.presence_of_element_located((
+                    By.XPATH, path)))
+
+    def click_button(self, driver, key_word):
+        """Click a button."""
+        path = "//div[text()={}]".format(quoted_key(key_word))
+        driver.find_element_by_xpath(path).click()
+
+    def input_value(self, driver, data_reactid, value):
+        """Input values to field."""
+        path = "//input[@data-reactid={}]".format(quoted_key(data_reactid))
+        driver.find_element_by_xpath(path).send_keys(value)
+
+    def test_basic_checks(
+            self, driver, logger, rw_project_proxy, rw_user_proxy):
+        """test_basic_checks."""
+        logger.debug('Check access to all basic pages.')
+        basic_pages = (
+            ['Accounts', 'Catalog', 'Launchpad', 'ADMINISTRATION',
+             'PROJECT: default', 'admin'])
+        for key_word in basic_pages:
+            self.click_element_and_wait(driver, key_word)
+
+        logger.debug('Create a test project.')
+        self.click_element_and_wait(driver, 'ADMINISTRATION')
+        self.click_element_and_wait(driver, 'Project Management', wait=False)
+        self.click_button(driver, 'Add Project')
+        self.input_value(driver, '.0.4.0.1.0.4.0.0.1.0.1', 'test_project')
+        self.click_button(driver, 'Create')
+
+        logger.debug('Verify test project is created in ui.')
+        path = "//div[text()={}]".format(quoted_key('test_project'))
+        WebDriverWait(driver, 10).until(
+            EC.presence_of_element_located((
+                By.XPATH, path)))
+
+        logger.debug('Verify test project is created in config.')
+        project_cm_config_xpath = '/project[name={}]/project-state'
+        project_ = rw_project_proxy.get_config(
+            project_cm_config_xpath.format(
+                quoted_key('test_project')), list_obj=True)
+        assert project_
+
+        logger.debug('Create a test user.')
+        self.click_element_and_wait(driver, 'ADMINISTRATION')
+        self.click_element_and_wait(driver, 'User Management', wait=False)
+        self.click_button(driver, 'Add User')
+        self.input_value(driver, '.0.4.0.1.1.0.4.0.0.1.0.1', 'test_user')
+        self.input_value(driver, '.0.4.0.1.1.0.4.0.3.1.0.1', 'mypasswd')
+        self.input_value(driver, '.0.4.0.1.1.0.4.0.3.1.1.1', 'mypasswd')
+        self.click_button(driver, 'Create')
+
+        logger.debug('Verify test user is created in ui.')
+        path = "//div[text()={}]".format(quoted_key('test_user'))
+        WebDriverWait(driver, 10).until(
+            EC.presence_of_element_located((
+                By.XPATH, path)))
+
+        logger.debug('Verify test user is created in config.')
+        user_config_xpath = (
+            '/user-config/user[user-name={user_name}][user-domain={domain}]')
+        user_ = rw_user_proxy.get_config(
+            user_config_xpath.format(
+                user_name=quoted_key('test_user'),
+                domain=quoted_key('system')))
+        assert user_
diff --git a/rwlaunchpad/ra/pytest/ns/ha/conftest.py b/rwlaunchpad/ra/pytest/ns/ha/conftest.py
new file mode 100644
index 0000000..973f447
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/ha/conftest.py
@@ -0,0 +1,182 @@
+#!/usr/bin/env python3
+"""
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+"""
+
+import pytest
+import subprocess
+import os
+import time
+
+import rift.vcs.vcs
+import rift.auto.mano as mano
+
+from gi.repository import (
+    RwConmanYang,
+    RwUserYang,
+    RwProjectYang,
+    RwRbacInternalYang,
+    RwRbacPlatformYang,
+    RwCloudYang,
+)
+
+@pytest.fixture(scope='session')
+def ha_mgmt_sessions(sut_host_addrs, session_type):
+    """Fixture that returns mgmt sessions for active, standby LPs"""
+    sessions = {}
+    for name,addr in sut_host_addrs.items():
+        if session_type == 'netconf':
+            mgmt_session = rift.auto.session.NetconfSession(host=addr)
+        elif session_type == 'restconf':
+            mgmt_session = rift.auto.session.RestconfSession(host=addr)
+
+        if 'standby' in name:
+            sessions['standby'] = mgmt_session
+        elif 'active' in name:
+            sessions['active'] = mgmt_session
+            mgmt_session.connect()
+            rift.vcs.vcs.wait_until_system_started(mgmt_session)
+
+    return sessions
+
+@pytest.fixture(scope='session')
+def active_mgmt_session(ha_mgmt_sessions):
+    """Fixture that returns mgmt sessions for active LP"""
+    return ha_mgmt_sessions['active']
+
+@pytest.fixture(scope='session')
+def standby_mgmt_session(ha_mgmt_sessions):
+    """Fixture that returns mgmt sessions for standby LP"""
+    return ha_mgmt_sessions['standby']
+
+@pytest.fixture(scope='session')
+def active_confd_host(active_mgmt_session):
+    """Fixture that returns mgmt sessions for active LP"""
+    return active_mgmt_session.host
+
+@pytest.fixture(scope='session')
+def standby_confd_host(standby_mgmt_session):
+    """Fixture that returns mgmt sessions for standby LP"""
+    return standby_mgmt_session.host
+
+@pytest.fixture(scope='session')
+def revertive_pref_host(active_mgmt_session):
+    """Fixture that returns mgmt sessions for active LP"""
+    return active_mgmt_session.host
+
+@pytest.fixture(scope='session')
+def active_site_name(active_mgmt_session):
+    """Fixture that returns mgmt sessions for active LP"""
+    return 'site-a'
+
+@pytest.fixture(scope='session')
+def standby_site_name(standby_mgmt_session):
+    """Fixture that returns mgmt sessions for standby LP"""
+    return 'site-b'
+
+@pytest.fixture(scope='session', autouse=True)
+def redundancy_config_setup(logger, active_confd_host, standby_confd_host, active_mgmt_session):
+    """Fixture that prepares the rw-redundancy-config.xml file and copies it to RVR of active, standby systems;
+    starts the mock dns script in the revertive-preference host.
+    It assumes system-tests are running containers where launchpad runs in production mode"""
+
+    # Starts the mock dns script in revertive-preference host which is the active system.
+    ssh_mock_dns_cmd = 'ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no {revertive_pref_host} -- "python3 /usr/rift/usr/rift/systemtest/util/test_mock_dns.py --active-site site-a {active_host} --standby-site site-b {standby_host}"'.format(
+        revertive_pref_host=active_confd_host, active_host=active_confd_host, standby_host=standby_confd_host)
+    logger.debug('Running mock dns script in host {host}; cmd: {ssh_cmd}'.format(host=active_confd_host,
+                                                                                 ssh_cmd=ssh_mock_dns_cmd))
+    subprocess.Popen(ssh_mock_dns_cmd, shell=True)
+    # Have to check if the script ran fine
+
+    # Prepares the rw-redundancy-config.xml file
+    redundancy_cfg_file_path = os.path.join(os.getenv('RIFT_INSTALL'),
+                                            'usr/rift/systemtest/config/rw-redundancy-config.xml')
+    with open(redundancy_cfg_file_path) as f:
+        file_content = f.read()
+
+    with open(redundancy_cfg_file_path+'.auto', 'w') as f:
+        new_content = file_content.replace('1.1.1.1', active_confd_host).replace('2.2.2.2', standby_confd_host)
+        logger.debug('redundancy config file content: {}'.format(new_content))
+        f.write(new_content)
+
+    # Copies the redundancy config file to active, standby systems
+    for host_addr in (active_confd_host, standby_confd_host):
+        scp_cmd = 'scp -o StrictHostkeyChecking=no {file_path} {host}:/usr/rift/var/rift/rw-redundancy-config.xml'.format(
+            file_path=redundancy_cfg_file_path+'.auto', host=host_addr)
+        logger.debug(
+            'Copying redundancy config xml to host {host}; scp cmd: {scp_cmd}'.format(host=host_addr, scp_cmd=scp_cmd))
+        assert os.system(scp_cmd) == 0
+
+    # Restart the launchpad service in active, standby systems
+    for host_addr in (active_confd_host, standby_confd_host):
+        ssh_launchpad_restart_cmd = 'ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no {host} -- "sudo pkill rwmain"'.format(
+            host=host_addr)
+        logger.debug('Restarting launchpad service in host {host}. cmd: {ssh_cmd}'.format(host=host_addr,
+                                                                                          ssh_cmd=ssh_launchpad_restart_cmd))
+        assert os.system(ssh_launchpad_restart_cmd.format(host=host_addr)) == 0
+        time.sleep(30)
+
+    active_mgmt_session.connect()
+    rift.vcs.vcs.wait_until_system_started(active_mgmt_session)
+    mano.verify_ha_redundancy_state(active_mgmt_session)
+
+@pytest.fixture(scope='session')
+def ha_lp_nodes(sut_host_addrs, session_type):
+    """Fixture that returns rift.auto.mano.LpNode objects for active, standby LPs"""
+    lp_nodes = {}
+    for name,addr in sut_host_addrs.items():
+        lp_node = mano.LpNode(host=addr, session_type=session_type, connect=False)
+        if 'standby' in name:
+            lp_nodes['standby'] = lp_node
+        elif 'active' in name:
+            lp_nodes['active'] = lp_node
+
+    return lp_nodes
+
+@pytest.fixture(scope='session')
+def active_lp_node_obj(ha_lp_nodes):
+    """Fixture that returns rift.auto.mano.LpNode object for active LP"""
+    return ha_lp_nodes['active']
+
+@pytest.fixture(scope='session')
+def standby_lp_node_obj(ha_lp_nodes):
+    """Fixture that returns rift.auto.mano.LpNode object for standby LP"""
+    return ha_lp_nodes['standby']
+
+@pytest.fixture(scope='session')
+def rw_active_user_proxy(active_mgmt_session):
+    return active_mgmt_session.proxy(RwUserYang)
+
+@pytest.fixture(scope='session')
+def rw_active_project_proxy(active_mgmt_session):
+    return active_mgmt_session.proxy(RwProjectYang)
+
+@pytest.fixture(scope='session')
+def rw_active_rbac_int_proxy(active_mgmt_session):
+    return active_mgmt_session.proxy(RwRbacInternalYang)
+
+@pytest.fixture(scope='session')
+def rw_active_conman_proxy(active_mgmt_session):
+    return active_mgmt_session.proxy(RwConmanYang)
+
+@pytest.fixture(scope='session')
+def rbac_active_platform_proxy(active_mgmt_session):
+    return active_mgmt_session.proxy(RwRbacPlatformYang)
+
+@pytest.fixture(scope='session')
+def rw_active_cloud_pxy(active_mgmt_session):
+    return active_mgmt_session.proxy(RwCloudYang)
diff --git a/rwlaunchpad/ra/pytest/ns/ha/test_ha_basic.py b/rwlaunchpad/ra/pytest/ns/ha/test_ha_basic.py
new file mode 100644
index 0000000..102c61b
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/ha/test_ha_basic.py
@@ -0,0 +1,261 @@
+#!/usr/bin/env python3
+"""
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+"""
+
+import gi
+import pytest
+import random
+import time
+
+import rift.auto.mano as mano
+import rift.auto.descriptor
+from gi.repository.RwKeyspec import quoted_key
+
+from gi.repository import (
+    RwProjectNsdYang,
+    RwNsrYang,
+    RwVnfrYang,
+    RwVlrYang,
+    RwCloudYang,
+    RwConmanYang,
+)
+
+@pytest.fixture(scope='module')
+def test_projects():
+    projects = ['default']
+    for idx in range(1, 4):
+        projects.append('project_ha_'+str(idx))
+    return projects
+
+
+@pytest.mark.setup('active_configuration')
+@pytest.mark.incremental
+class TestActiveLpConfiguration(object):
+    """sets up the configuration as per RIFT-17854"""
+    def test_create_project_users(self, rbac_user_passwd, user_domain, rw_active_user_proxy, logger,
+            rw_active_project_proxy, rw_active_rbac_int_proxy, rw_active_conman_proxy, test_projects, user_roles):
+        # Create test users
+        user_name_pfx = 'user_ha_'
+        users = []
+        for idx in range(1, 9):
+            users.append(user_name_pfx+str(idx))
+            mano.create_user(rw_active_user_proxy, user_name_pfx+str(idx), rbac_user_passwd, user_domain)
+
+        # Create projects and assign roles to users in the newly created project
+        for project_name in test_projects:
+            if project_name == 'default':
+                continue
+            logger.debug('Creating project {}'.format(project_name))
+            mano.create_project(rw_active_conman_proxy, project_name)
+
+        for project_name in test_projects:
+            for _ in range(2):
+                role = random.choice(user_roles)
+                user = users.pop()
+                logger.debug('Assinging role {} to user {} in project {}'.format(role, user, project_name))
+                mano.assign_project_role_to_user(rw_active_project_proxy, role, user, project_name, user_domain,
+                                                rw_active_rbac_int_proxy)
+
+    def test_create_cloud_accounts(self, cloud_account, fmt_prefixed_cloud_xpath, fmt_cloud_xpath, rw_active_cloud_pxy, 
+                                test_projects, logger):
+        for project_name in test_projects:
+            logger.debug('Creating cloud account {} for project {}'.format(cloud_account.name, project_name))
+            xpath = fmt_prefixed_cloud_xpath.format(project=quoted_key(project_name),
+                                                    account_name=quoted_key(cloud_account.name))
+            rw_active_cloud_pxy.replace_config(xpath, cloud_account)
+            xpath_no_pfx = fmt_cloud_xpath.format(project=quoted_key(project_name),
+                                                  account_name=quoted_key(cloud_account.name))
+            response =  rw_active_cloud_pxy.get(xpath_no_pfx)
+            assert response.name == cloud_account.name
+            assert response.account_type == cloud_account.account_type
+
+            rw_active_cloud_pxy.wait_for(fmt_cloud_xpath.format(project=quoted_key(project_name), account_name=quoted_key(
+            cloud_account.name)) + '/connection-status/status', 'success', timeout=30, fail_on=['failure'])
+
+    def test_onboard_descriptors(self, descriptors, test_projects, active_mgmt_session, fmt_nsd_catalog_xpath, logger):
+        # Uploads the descriptors
+        for project_name in test_projects:
+            for descriptor in descriptors:
+                logger.debug('Onboarding descriptor {} for project {}'.format(descriptor, project_name))
+                rift.auto.descriptor.onboard(active_mgmt_session, descriptor, project=project_name)
+
+        # Verify whether the descriptors uploaded successfully
+        nsd_pxy = active_mgmt_session.proxy(RwProjectNsdYang)
+        for project_name in test_projects:
+            nsd_xpath = fmt_nsd_catalog_xpath.format(project=quoted_key(project_name))
+            nsd_catalog = nsd_pxy.get_config(nsd_xpath)
+            assert nsd_catalog
+    
+    @pytest.mark.skipif(not pytest.config.getoption("--nsr-test"), reason="need --nsr-test option to run")
+    def test_instantiate_nsr(self, fmt_nsd_catalog_xpath, cloud_account, active_mgmt_session, logger, test_projects):
+        nsd_pxy = active_mgmt_session.proxy(RwProjectNsdYang)
+        rwnsr_pxy = active_mgmt_session.proxy(RwNsrYang)
+
+        for project_name in test_projects:
+            nsd_xpath = fmt_nsd_catalog_xpath.format(project=quoted_key(project_name))
+            nsd_catalog = nsd_pxy.get_config(nsd_xpath)
+            assert nsd_catalog
+            nsd = nsd_catalog.nsd[0]
+            nsr = rift.auto.descriptor.create_nsr(cloud_account.name, nsd.name, nsd)
+
+            logger.debug('Instantiating NS for project {}'.format(project_name))
+            rift.auto.descriptor.instantiate_nsr(nsr, rwnsr_pxy, logger, project=project_name)
+
+
+@pytest.mark.depends('active_configuration')
+@pytest.mark.setup('first-failover')
+@pytest.mark.incremental
+class TestHaFirstFailover(object):
+    def test_collect_active_lp_data(self, active_lp_node_obj, active_confd_host, standby_confd_host, logger):
+        mano.verify_hagr_endpoints(active_confd_host, standby_confd_host)
+        active_lp_node_obj.collect_data()
+
+    def test_attempt_indirect_failover(self, revertive_pref_host, active_confd_host, standby_confd_host, 
+                                        active_site_name, standby_site_name, logger):
+        # Wait for redundancy poll interval though collecting data on active LP takes more than 5 secs
+        time.sleep(5)
+        logger.debug('Attempting first failover. Host {} will be new active'.format(standby_confd_host))
+        mano.indirect_failover(revertive_pref_host, new_active_ip=standby_confd_host, new_active_site=standby_site_name, 
+            new_standby_ip=active_confd_host, new_standby_site=active_site_name)
+
+    def test_wait_for_standby_to_comeup(self, standby_mgmt_session, active_confd_host, standby_confd_host):
+        """Wait for the standby to come up; Wait for endpoint 'ha/geographic/active' to return 200"""
+        mano.wait_for_standby_to_become_active(standby_mgmt_session)
+        # mano.verify_hagr_endpoints(active_host=standby_confd_host, standby_host=active_confd_host)
+
+    def test_collect_standby_lp_data(self, standby_lp_node_obj, standby_mgmt_session, cloud_account,
+                                         fmt_cloud_xpath, test_projects, fmt_nsd_catalog_xpath):
+        time.sleep(180)
+        rw_new_active_cloud_pxy = standby_mgmt_session.proxy(RwCloudYang)
+        nsd_pxy = standby_mgmt_session.proxy(RwProjectNsdYang)
+        rwnsr_proxy = standby_mgmt_session.proxy(RwNsrYang)
+
+        for project_name in test_projects:
+            rw_new_active_cloud_pxy.wait_for(
+                fmt_cloud_xpath.format(project=quoted_key(project_name), account_name=quoted_key(
+                    cloud_account.name)) + '/connection-status/status', 'success', timeout=60, fail_on=['failure'])
+
+            # nsd_catalog = nsd_pxy.get_config(fmt_nsd_catalog_xpath.format(project=quoted_key(project_name)))
+            # assert nsd_catalog
+
+            if pytest.config.getoption("--nsr-test"):
+                nsr_opdata = rwnsr_proxy.get(
+                    '/rw-project:project[rw-project:name={project}]/ns-instance-opdata'.format(
+                        project=quoted_key(project_name)))
+                assert nsr_opdata
+                nsrs = nsr_opdata.nsr
+
+                for nsr in nsrs:
+                    xpath = "/rw-project:project[rw-project:name={project}]/ns-instance-opdata/nsr[ns-instance-config-ref={config_ref}]/config-status".format(
+                        project=quoted_key(project_name), config_ref=quoted_key(nsr.ns_instance_config_ref))
+                    rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=400)
+
+        standby_lp_node_obj.collect_data()
+
+    def test_match_active_standby(self, active_lp_node_obj, standby_lp_node_obj):
+        active_lp_node_obj.compare(standby_lp_node_obj)
+
+
+@pytest.mark.depends('first-failover')
+@pytest.mark.setup('active-teardown')
+@pytest.mark.incremental
+class TestHaTeardown(object):
+    """It terminates the NS & deletes descriptors, cloud accounts, projects"""
+    @pytest.mark.skipif(not pytest.config.getoption("--nsr-test"), reason="need --nsr-test option to run")
+    def test_terminate_nsr(self, test_projects, standby_mgmt_session, logger):
+        rwnsr_pxy = standby_mgmt_session.proxy(RwNsrYang)
+        rwvnfr_pxy = standby_mgmt_session.proxy(RwVnfrYang)
+        rwvlr_pxy = standby_mgmt_session.proxy(RwVlrYang)
+
+        for project_name in test_projects:
+            logger.debug("Trying to terminate NSR in project {}".format(project_name))
+            rift.auto.descriptor.terminate_nsr(rwvnfr_pxy, rwnsr_pxy, rwvlr_pxy, logger, project_name)
+
+    def test_delete_descriptors(self, standby_mgmt_session, test_projects, logger):
+        for project_name in test_projects:
+            logger.info("Trying to delete the descriptors in project {}".format(project_name))
+            rift.auto.descriptor.delete_descriptors(standby_mgmt_session, project_name)
+
+    def test_delete_cloud_accounts(self, standby_mgmt_session, logger, test_projects, cloud_account):
+        for project_name in test_projects:
+            logger.info("Trying to delete the cloud-account in project {}".format(project_name))
+            rift.auto.mano.delete_cloud_account(standby_mgmt_session, cloud_account.name, project_name)
+
+    def test_delete_projects(self, standby_mgmt_session, test_projects, logger):
+        rw_conman_proxy = standby_mgmt_session.proxy(RwConmanYang)
+        for project_name in test_projects:
+            if project_name == 'default':
+                continue
+            logger.debug('Deleting project {}'.format(project_name))
+            rift.auto.mano.delete_project(rw_conman_proxy, project_name)
+
+
+@pytest.mark.depends('active-teardown')
+@pytest.mark.incremental
+class TestHaFailoverToOriginalActive(object):
+    """Does a failover to original active and verifies the config"""
+    def test_collect_current_active_lp_data(self, standby_lp_node_obj, logger):
+        time.sleep(30)
+        logger.debug('Collecting data for host {}'.format(standby_lp_node_obj.host))
+        standby_lp_node_obj.collect_data()
+
+    def test_attempt_indirect_failover(self, revertive_pref_host, active_confd_host, standby_confd_host, 
+                                        active_site_name, standby_site_name, logger):
+        # Wait for redundancy poll interval.
+        time.sleep(5)
+        logger.debug('Attempting second failover. Host {} will be new active'.format(active_confd_host))
+        mano.indirect_failover(revertive_pref_host, new_active_ip=active_confd_host, new_active_site=active_site_name, 
+            new_standby_ip=standby_confd_host, new_standby_site=standby_site_name)
+
+    def test_wait_for_standby_to_comeup(self, active_mgmt_session, active_confd_host, standby_confd_host):
+        """Wait for the standby to come up; Wait for endpoint 'ha/geographic/active' to return 200"""
+        mano.wait_for_standby_to_become_active(active_mgmt_session)
+        # mano.verify_hagr_endpoints(active_host=standby_confd_host, standby_host=active_confd_host)
+
+    def test_collect_original_active_lp_data(self, active_lp_node_obj, logger):
+        active_lp_node_obj.session = None
+        logger.debug('Collecting data for host {}'.format(active_lp_node_obj.host))
+        active_lp_node_obj.collect_data()
+
+    def test_match_active_standby(self, active_lp_node_obj, standby_lp_node_obj):
+        standby_lp_node_obj.compare(active_lp_node_obj)
+
+    def test_delete_default_project(self, rw_active_conman_proxy):
+        rift.auto.mano.delete_project(rw_active_conman_proxy, 'default')
+
+    def test_users_presence_in_active(self, rw_active_user_proxy, user_keyed_xpath, user_domain):
+        """Users were not deleted as part of Teardown; Check those users should be present and delete them"""
+        user_config = rw_active_user_proxy.get_config('/user-config')
+        current_users_list = [user.user_name for user in user_config.user]
+
+        user_name_pfx = 'user_ha_'
+        original_test_users_list = [user_name_pfx+str(idx) for idx in range(1,9)]
+
+        assert set(original_test_users_list).issubset(current_users_list)
+
+        # Delete the users
+        for idx in range(1,9):
+            rw_active_user_proxy.delete_config(
+                user_keyed_xpath.format(user=quoted_key(user_name_pfx + str(idx)), domain=quoted_key(user_domain)))
+
+    def test_projects_deleted(self, test_projects, project_keyed_xpath, rw_active_conman_proxy):
+        """There should only be the default project; all other test projects are already deleted as part of Teardown"""
+        for project_name in test_projects:
+            project_ = rw_active_conman_proxy.get_config(
+                project_keyed_xpath.format(project_name=quoted_key(project_name)) + '/name')
+            assert project_ is None
\ No newline at end of file
diff --git a/rwlaunchpad/ra/pytest/ns/ha/test_ha_multiple_failovers.py b/rwlaunchpad/ra/pytest/ns/ha/test_ha_multiple_failovers.py
new file mode 100644
index 0000000..6b09485
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/ha/test_ha_multiple_failovers.py
@@ -0,0 +1,219 @@
+#!/usr/bin/env python3
+"""
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+"""
+
+import gi
+import pytest
+import random
+import time
+
+import rift.auto.mano as mano
+import rift.auto.descriptor
+from gi.repository.RwKeyspec import quoted_key
+
+from gi.repository import (
+    RwProjectNsdYang,
+    RwNsrYang,
+    RwVnfrYang,
+    RwVlrYang,
+    RwCloudYang,
+    RwConmanYang,
+)
+
+@pytest.fixture(scope='module')
+def test_project():
+    return 'project_ha'
+
+@pytest.mark.setup('active-configuration')
+@pytest.mark.incremental
+class TestMutipleFailoverActiveSetup(object):
+    def test_create_project_users(self, rbac_user_passwd, user_domain, rw_active_user_proxy, logger,
+            rw_active_project_proxy, rw_active_rbac_int_proxy, rw_active_conman_proxy, test_project, user_roles):
+        # Create test users
+        user_name_pfx = 'user_ha_'
+        users = []
+        for idx in range(1, 9):
+            users.append(user_name_pfx+str(idx))
+            mano.create_user(rw_active_user_proxy, user_name_pfx+str(idx), rbac_user_passwd, user_domain)
+
+        # Create a test project and assign roles to users in the newly created project
+        logger.debug('Creating project {}'.format(test_project))
+        mano.create_project(rw_active_conman_proxy, test_project)
+
+        for _ in range(8):
+            role = random.choice(user_roles)
+            user = users.pop()
+            logger.debug('Assinging role {} to user {} in project {}'.format(role, user, test_project))
+            mano.assign_project_role_to_user(rw_active_project_proxy, role, user, test_project, user_domain,
+                                            rw_active_rbac_int_proxy)
+
+    def test_create_cloud_account(self, cloud_account, fmt_prefixed_cloud_xpath, fmt_cloud_xpath, rw_active_cloud_pxy, 
+                                test_project, logger):
+        logger.debug('Creating cloud account {} for project {}'.format(cloud_account.name, test_project))
+        xpath = fmt_prefixed_cloud_xpath.format(project=quoted_key(test_project),
+                                                account_name=quoted_key(cloud_account.name))
+        rw_active_cloud_pxy.replace_config(xpath, cloud_account)
+        xpath_no_pfx = fmt_cloud_xpath.format(project=quoted_key(test_project),
+                                              account_name=quoted_key(cloud_account.name))
+        response =  rw_active_cloud_pxy.get(xpath_no_pfx)
+        assert response.name == cloud_account.name
+        assert response.account_type == cloud_account.account_type
+
+        rw_active_cloud_pxy.wait_for(fmt_cloud_xpath.format(project=quoted_key(test_project), account_name=quoted_key(
+        cloud_account.name)) + '/connection-status/status', 'success', timeout=30, fail_on=['failure'])
+
+    def test_onboard_descriptors(self, descriptors, test_project, active_mgmt_session, fmt_nsd_catalog_xpath, logger):
+        # Uploads the descriptors
+        pingpong_descriptors = descriptors['pingpong']
+        for descriptor in pingpong_descriptors:
+            logger.debug('Onboarding descriptor {} for project {}'.format(descriptor, test_project))
+            rift.auto.descriptor.onboard(active_mgmt_session, descriptor, project=test_project)
+
+        # Verify whether the descriptors uploaded successfully
+        nsd_pxy = active_mgmt_session.proxy(RwProjectNsdYang)
+        nsd_xpath = fmt_nsd_catalog_xpath.format(project=quoted_key(test_project))
+        nsd_catalog = nsd_pxy.get_config(nsd_xpath)
+        assert nsd_catalog
+    
+    def test_instantiate_nsr(self, fmt_nsd_catalog_xpath, cloud_account, active_mgmt_session, logger, test_project):
+        nsd_pxy = active_mgmt_session.proxy(RwProjectNsdYang)
+        rwnsr_pxy = active_mgmt_session.proxy(RwNsrYang)
+
+        nsd_xpath = fmt_nsd_catalog_xpath.format(project=quoted_key(test_project))
+        nsd_catalog = nsd_pxy.get_config(nsd_xpath)
+        assert nsd_catalog
+        nsd = nsd_catalog.nsd[0]
+        nsr = rift.auto.descriptor.create_nsr(cloud_account.name, nsd.name, nsd)
+
+        logger.debug('Instantiating NS for project {}'.format(test_project))
+        rift.auto.descriptor.instantiate_nsr(nsr, rwnsr_pxy, logger, project=test_project)
+
+
+@pytest.mark.depends('active-configuration')
+@pytest.mark.setup('multiple-failovers')
+@pytest.mark.incremental
+class TestHaMultipleFailovers(object):
+    def test_ha_multiple_failovers(self, revertive_pref_host, active_confd_host, standby_confd_host, standby_lp_node_obj, active_lp_node_obj, logger, 
+                                        fmt_cloud_xpath, cloud_account, test_project, active_site_name, standby_site_name, standby_mgmt_session, active_mgmt_session, descriptors):
+        count, failover_count = 1, 10
+        current_actv_mgmt_session, current_stdby_mgmt_session = active_mgmt_session, standby_mgmt_session
+        current_actv_lp_node_obj = active_lp_node_obj
+
+        descriptor_list = descriptors['haproxy'][::-1] + descriptors['vdud_cfgfile'][::-1]
+        
+        original_active_as_standby_kwargs = {'revertive_pref_host': revertive_pref_host, 'new_active_ip': standby_confd_host, 'new_active_site': standby_site_name, 
+            'new_standby_ip': active_confd_host, 'new_standby_site': active_site_name}
+        original_active_as_active_kwargs = {'revertive_pref_host': revertive_pref_host, 'new_active_ip':active_confd_host, 'new_active_site': active_site_name, 
+            'new_standby_ip': standby_confd_host, 'new_standby_site': standby_site_name}
+
+        while count <= failover_count:
+            kwargs = original_active_as_active_kwargs
+            if count%2 == 1:
+                kwargs = original_active_as_standby_kwargs
+
+            # upload descriptor
+            if count not in [5,6,7,8]:
+                descriptor = descriptor_list.pop()
+                rift.auto.descriptor.onboard(current_actv_mgmt_session, descriptor, project=test_project)
+
+            # Collect config, op-data from current active before doing a failover
+            current_actv_lp_node_obj.session = None
+            current_actv_lp_node_obj.collect_data()
+
+            time.sleep(5)
+            logger.debug('Failover Iteration - {}. Current standby {} will be the new active'.format(count, current_stdby_mgmt_session.host))
+            mano.indirect_failover(**kwargs)
+
+            last_actv_lp_node_obj = current_actv_lp_node_obj
+            current_actv_mgmt_session, current_stdby_mgmt_session = active_mgmt_session, standby_mgmt_session
+            current_actv_lp_node_obj = active_lp_node_obj
+            if count%2 == 1:
+                current_actv_lp_node_obj = standby_lp_node_obj
+                current_actv_mgmt_session, current_stdby_mgmt_session = standby_mgmt_session, active_mgmt_session
+
+            logger.debug('Waiting for the new active {} to come up'.format(current_actv_mgmt_session.host))
+            mano.wait_for_standby_to_become_active(current_actv_mgmt_session)
+
+            # Wait for NSR to become active
+            rw_new_active_cloud_pxy = current_actv_mgmt_session.proxy(RwCloudYang)
+            rwnsr_proxy = current_actv_mgmt_session.proxy(RwNsrYang)
+
+            rw_new_active_cloud_pxy.wait_for(
+                fmt_cloud_xpath.format(project=quoted_key(test_project), account_name=quoted_key(
+                    cloud_account.name)) + '/connection-status/status', 'success', timeout=60, fail_on=['failure'])
+
+            nsr_opdata = rwnsr_proxy.get(
+                    '/rw-project:project[rw-project:name={project}]/ns-instance-opdata'.format(
+                        project=quoted_key(test_project)))
+            assert nsr_opdata
+            nsrs = nsr_opdata.nsr
+
+            for nsr in nsrs:
+                xpath = "/rw-project:project[rw-project:name={project}]/ns-instance-opdata/nsr[ns-instance-config-ref={config_ref}]/config-status".format(
+                    project=quoted_key(test_project), config_ref=quoted_key(nsr.ns_instance_config_ref))
+                rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=400)
+
+            # Collect config, op-data from new active
+            current_actv_lp_node_obj.session = None
+            current_actv_lp_node_obj.collect_data()
+
+            # Compare data between last active and current active
+            current_actv_lp_node_obj.compare(last_actv_lp_node_obj)
+            count += 1
+
+
+@pytest.mark.depends('multiple-failovers')
+@pytest.mark.incremental
+class TestHaOperationPostMultipleFailovers(object):
+    def test_instantiate_nsr(self, fmt_nsd_catalog_xpath, cloud_account, active_mgmt_session, logger, test_project):
+        """Check if a new NS instantiation goes through after multiple HA failovers.
+        It uses metadata cfgfile nsd for the instantiation.
+        There alreasy exists ping pong NS instantiation"""
+        nsd_pxy = active_mgmt_session.proxy(RwProjectNsdYang)
+        rwnsr_pxy = active_mgmt_session.proxy(RwNsrYang)
+
+        nsd_xpath = fmt_nsd_catalog_xpath.format(project=quoted_key(test_project))
+        nsd_catalog = nsd_pxy.get_config(nsd_xpath)
+        assert nsd_catalog
+        cfgfile_nsd = [nsd for nsd in nsd_catalog.nsd if 'cfgfile_nsd' in nsd.name][0]
+        nsr = rift.auto.descriptor.create_nsr(cloud_account.name, cfgfile_nsd.name, cfgfile_nsd)
+
+        logger.debug('Instantiating cfgfile NS for project {}'.format(test_project))
+        rift.auto.descriptor.instantiate_nsr(nsr, rwnsr_pxy, logger, project=test_project)
+
+    def test_nsr_terminate(self, active_mgmt_session, logger, test_project):
+        """"""
+        rwnsr_pxy = active_mgmt_session.proxy(RwNsrYang)
+        rwvnfr_pxy = active_mgmt_session.proxy(RwVnfrYang)
+        rwvlr_pxy = active_mgmt_session.proxy(RwVlrYang)
+
+        logger.debug("Trying to terminate ping pong, cfgfile NSRs in project {}".format(test_project))
+        rift.auto.descriptor.terminate_nsr(rwvnfr_pxy, rwnsr_pxy, rwvlr_pxy, logger, test_project)
+
+    def test_delete_descriptors(self, active_mgmt_session, test_project, logger):
+        logger.info("Trying to delete the descriptors in project {}".format(test_project))
+        rift.auto.descriptor.delete_descriptors(active_mgmt_session, test_project)
+
+    def test_delete_cloud_accounts(self, active_mgmt_session, logger, test_project, cloud_account):
+        logger.info("Trying to delete the cloud-account in project {}".format(test_project))
+        rift.auto.mano.delete_cloud_account(active_mgmt_session, cloud_account.name, test_project)
+
+    def test_delete_projects(self, active_mgmt_session, test_project, logger):
+        rw_conman_proxy = active_mgmt_session.proxy(RwConmanYang)
+        logger.debug('Deleting project {}'.format(test_project))
+        rift.auto.mano.delete_project(rw_conman_proxy, test_project)
\ No newline at end of file
diff --git a/rwlaunchpad/ra/pytest/ns/ha/test_ha_operations.py b/rwlaunchpad/ra/pytest/ns/ha/test_ha_operations.py
new file mode 100644
index 0000000..5372a1e
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/ha/test_ha_operations.py
@@ -0,0 +1,274 @@
+#!/usr/bin/env python3
+"""
+#
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+"""
+
+import gi
+import pytest
+import random
+import time
+
+import rift.auto.mano as mano
+import rift.auto.descriptor
+from gi.repository.RwKeyspec import quoted_key
+
+from gi.repository import (
+    RwProjectNsdYang,
+    RwNsrYang,
+    RwVnfrYang,
+    RwVlrYang,
+    RwProjectVnfdYang,
+    RwCloudYang
+)
+
+
+@pytest.mark.setup('active_configuration')
+@pytest.mark.incremental
+class TestActiveLpConfiguration(object):
+    """Setting up the configuration."""
+
+    def collect_active_lp_data(
+            self, active_lp_node_obj, active_confd_host,
+            standby_confd_host, logger):
+        """Collect active lp data."""
+        mano.verify_hagr_endpoints(active_confd_host, standby_confd_host)
+        active_lp_node_obj.collect_data()
+
+    def wait_for_standby_to_comeup(
+            self, standby_mgmt_session, active_confd_host, standby_confd_host):
+        """Wait for the standby to come up.
+
+        Wait for endpoint 'ha/geographic/active' to return 200
+        """
+        mano.wait_for_standby_to_become_active(standby_mgmt_session)
+        # mano.verify_hagr_endpoints(
+        #    active_host=standby_confd_host, standby_host=active_confd_host)
+
+    def collect_standby_lp_data(
+            self, standby_lp_node_obj, standby_mgmt_session, cloud_account,
+            fmt_cloud_xpath, projects, fmt_nsd_catalog_xpath):
+        """Collect standby lp data."""
+        time.sleep(180)
+        rw_new_active_cloud_pxy = standby_mgmt_session.proxy(RwCloudYang)
+        nsd_pxy = standby_mgmt_session.proxy(RwProjectNsdYang)
+        rwnsr_proxy = standby_mgmt_session.proxy(RwNsrYang)
+
+        for project_name in projects:
+            rw_new_active_cloud_pxy.wait_for(
+                fmt_cloud_xpath.format(
+                    project=quoted_key(project_name),
+                    account_name=quoted_key(cloud_account.name)) +
+                '/connection-status/status', 'success',
+                timeout=60, fail_on=['failure'])
+
+            # nsd_catalog = nsd_pxy.get_config(
+            #    fmt_nsd_catalog_xpath.format(project=quoted_key(project_name)))
+            # assert nsd_catalog
+
+            if pytest.config.getoption("--nsr-test"):
+                nsr_opdata = rwnsr_proxy.get(
+                    '/rw-project:project[rw-project:name={project}]' +
+                    '/ns-instance-opdata'.format(
+                        project=quoted_key(project_name))
+                )
+
+                assert nsr_opdata
+                nsrs = nsr_opdata.nsr
+
+                for nsr in nsrs:
+                    xpath = (
+                        '/rw-project:project[rw-project:name={project}]' +
+                        '/ns-instance-opdata/nsr[ns-instance-config-ref=' +
+                        '{config_ref}]/config-status'.format(
+                            project=quoted_key(project_name),
+                            config_ref=quoted_key(nsr.ns_instance_config_ref))
+                    )
+
+                    rwnsr_proxy.wait_for(
+                        xpath, "configured", fail_on=['failed'], timeout=400)
+
+        standby_lp_node_obj.collect_data()
+
+    def attempt_indirect_failover(
+            self, revertive_pref_host, active_confd_host, standby_confd_host,
+            active_site_name, standby_site_name, logger):
+        """Try indirect failover."""
+        time.sleep(5)
+        logger.debug(
+            'Attempting first failover. Host {} will be new active'.format(
+                standby_confd_host))
+
+        mano.indirect_failover(
+            revertive_pref_host, new_active_ip=standby_confd_host,
+            new_active_site=standby_site_name,
+            new_standby_ip=active_confd_host,
+            new_standby_site=active_site_name)
+
+    def match_active_standby(self, active_lp_node_obj, standby_lp_node_obj):
+        """Compare active standby."""
+        active_lp_node_obj.compare(standby_lp_node_obj)
+
+    def test_create_project_users_cloud_acc(
+            self, rbac_user_passwd, user_domain, rw_active_user_proxy, logger,
+            rw_active_project_proxy, rw_active_rbac_int_proxy, cloud_account,
+            rw_active_conman_proxy, rw_active_cloud_pxy, user_roles,
+            fmt_prefixed_cloud_xpath, fmt_cloud_xpath, descriptors,
+            active_mgmt_session, fmt_nsd_catalog_xpath, active_lp_node_obj,
+            standby_lp_node_obj, active_confd_host, standby_confd_host,
+            revertive_pref_host, active_site_name, standby_site_name,
+            standby_mgmt_session):
+        """Create 3 of users, projects, cloud accounts, decriptors & nsrs."""
+        def failover_and_match():
+            """Try an indirect failover.
+
+            Match active and standby data
+            """
+            self.collect_active_lp_data(
+                active_lp_node_obj, active_confd_host,
+                standby_confd_host, logger)
+            self.attempt_indirect_failover(
+                revertive_pref_host, active_confd_host, standby_confd_host,
+                active_site_name, standby_site_name, logger)
+            self.wait_for_standby_to_comeup(
+                standby_mgmt_session, active_confd_host, standby_confd_host)
+            self.collect_standby_lp_data(
+                standby_lp_node_obj, standby_mgmt_session, cloud_account,
+                fmt_cloud_xpath, projects, fmt_nsd_catalog_xpath)
+            self.match_active_standby(active_lp_node_obj, standby_lp_node_obj)
+
+        def delete_data_set(idx):
+
+            rift.auto.descriptor.terminate_nsr(
+                rwvnfr_pxy, rwnsr_pxy, rwvlr_pxy, logger,
+                project=projects[idx])
+
+            rift.auto.descriptor.delete_descriptors(
+                active_mgmt_session, project_name)
+
+            rw_active_cloud_pxy.delete_config(
+                fmt_prefixed_cloud_xpath.format(
+                    project=quoted_key(projects[idx]),
+                    account_name=quoted_key(cloud_account.name)
+                )
+            )
+            response = rw_active_cloud_pxy.get(
+                fmt_cloud_xpath.format(
+                    project=quoted_key(projects[idx]),
+                    account_name=quoted_key(cloud_account.name)
+                )
+            )
+            assert response is None
+
+            mano.delete_project(rw_active_conman_proxy, projects[idx])
+            projects.pop()
+            mano.delete_user(rw_active_user_proxy, users[idx], user_domain)
+            users.pop()
+
+        # Create test users
+        user_name_pfx = 'user_ha_'
+        users = []
+        for idx in range(1, 4):
+            users.append(user_name_pfx + str(idx))
+
+            mano.create_user(
+                rw_active_user_proxy, user_name_pfx + str(idx),
+                rbac_user_passwd, user_domain)
+
+        # Create projects and assign roles to users
+        prj_name_pfx = 'prj_ha_'
+        projects = []
+        for idx in range(1, 4):
+            project_name = prj_name_pfx + str(idx)
+            projects.append(project_name)
+            mano.create_project(
+                rw_active_conman_proxy, project_name)
+
+        for idx in range(0, 3):
+            project_name = projects[idx]
+            role = random.choice(user_roles)
+            user = users[idx]
+            logger.debug(
+                'Assinging role {} to user {} in project {}'.format(
+                    role, user, project_name))
+
+            mano.assign_project_role_to_user(
+                rw_active_project_proxy, role, user, project_name,
+                user_domain, rw_active_rbac_int_proxy)
+
+            logger.debug(
+                'Creating cloud account {} for project {}'.format(
+                    cloud_account.name, project_name))
+
+            xpath = fmt_prefixed_cloud_xpath.format(
+                project=quoted_key(project_name),
+                account_name=quoted_key(cloud_account.name))
+
+            rw_active_cloud_pxy.replace_config(xpath, cloud_account)
+
+            xpath_no_pfx = fmt_cloud_xpath.format(
+                project=quoted_key(project_name),
+                account_name=quoted_key(cloud_account.name))
+
+            response = rw_active_cloud_pxy.get(xpath_no_pfx)
+            assert response.name == cloud_account.name
+            assert response.account_type == cloud_account.account_type
+
+            rw_active_cloud_pxy.wait_for(
+                fmt_cloud_xpath.format(
+                    project=quoted_key(project_name),
+                    account_name=quoted_key(cloud_account.name)) +
+                '/connection-status/status', 'success', timeout=30,
+                fail_on=['failure'])
+
+            # Uploads the descriptors
+            for descriptor in descriptors:
+                rift.auto.descriptor.onboard(
+                    active_mgmt_session, descriptor, project=project_name)
+
+            # Verify whether the descriptors uploaded successfully
+            logger.debug(
+                'Onboarding descriptors for project {}'.format(project_name))
+
+            nsd_pxy = active_mgmt_session.proxy(RwProjectNsdYang)
+            rwnsr_pxy = active_mgmt_session.proxy(RwNsrYang)
+            rwvnfr_pxy = active_mgmt_session.proxy(RwVnfrYang)
+            rwvlr_pxy = active_mgmt_session.proxy(RwVlrYang)
+
+            nsd_xpath = fmt_nsd_catalog_xpath.format(
+                project=quoted_key(project_name))
+            nsd_catalog = nsd_pxy.get_config(nsd_xpath)
+            assert nsd_catalog
+
+            nsd_xpath = fmt_nsd_catalog_xpath.format(
+                project=quoted_key(project_name))
+            nsd_catalog = nsd_pxy.get_config(nsd_xpath)
+            assert nsd_catalog
+            nsd = nsd_catalog.nsd[0]
+            nsr = rift.auto.descriptor.create_nsr(
+                cloud_account.name, nsd.name, nsd)
+
+            logger.debug(
+                'Instantiating NS for project {}'.format(project_name))
+            rift.auto.descriptor.instantiate_nsr(
+                nsr, rwnsr_pxy, logger, project=project_name)
+
+        delete_data_set(2)
+        failover_and_match()
+        delete_data_set(1)
+        failover_and_match()
+
+
diff --git a/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py b/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py
index 846ef2e..ec472a9 100644
--- a/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py
+++ b/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py
@@ -15,18 +15,26 @@
 #   limitations under the License.
 #
 
+import gi
 import pytest
 
-from gi.repository import NsrYang, RwNsrYang, RwVnfrYang, NsdYang, RwNsdYang
+from gi.repository import (
+    NsrYang,
+    RwNsrYang,
+    RwVnfrYang,
+    RwProjectNsdYang,
+    )
 import rift.auto.session
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
 
 @pytest.fixture(scope='module')
 def proxy(request, mgmt_session):
     return mgmt_session.proxy
 
 
-ScalingGroupInstance = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance
-ScalingGroup = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup
+ScalingGroupInstance = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance
+ScalingGroup = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup
 
 INSTANCE_ID = 1
 
@@ -41,9 +49,9 @@
             proxy (Callable): Proxy for launchpad session.
             state (str): Expected state
         """
-        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsr_opdata = proxy(RwNsrYang).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
         nsr = nsr_opdata.nsr[0]
-        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
+        xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(quoted_key(nsr.ns_instance_config_ref))
         proxy(RwNsrYang).wait_for(xpath, state, timeout=240)
 
     def verify_scaling_group(self, proxy, group_name, expected_records_count, scale_out=True):
@@ -58,12 +66,12 @@
             2. Status of the scaling group
             3. New vnfr record has been created.
         """
-        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsr_opdata = proxy(RwNsrYang).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
         nsr_id = nsr_opdata.nsr[0].ns_instance_config_ref
 
-        xpath = ('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'
-                 '/scaling-group-record[scaling-group-name-ref="{}"]').format(
-                        nsr_id, group_name)
+        xpath = ('/rw-project:project[rw-project:name="default"]/ns-instance-opdata/nsr[ns-instance-config-ref={}]'
+                 '/scaling-group-record[scaling-group-name-ref={}]').format(
+                        quoted_key(nsr_id), quoted_key(group_name))
 
         scaling_record = proxy(NsrYang).get(xpath)
 
@@ -74,7 +82,7 @@
 
             for vnfr in instance.vnfrs:
                 vnfr_record = proxy(RwVnfrYang).get(
-                        "/vnfr-catalog/vnfr[id='{}']".format(vnfr))
+                        "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr[id={}]".format(quoted_key(vnfr)))
                 assert vnfr_record is not None
 
     def verify_scale_up(self, proxy, group_name, expected):
@@ -105,38 +113,38 @@
         """Wait till the NSR state moves to configured before starting scaling
         tests.
         """
-        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsr_opdata = proxy(RwNsrYang).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
         nsrs = nsr_opdata.nsr
 
         assert len(nsrs) == 1
         current_nsr = nsrs[0]
 
-        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(current_nsr.ns_instance_config_ref)
+        xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(quoted_key(current_nsr.ns_instance_config_ref))
         proxy(RwNsrYang).wait_for(xpath, "configured", timeout=240)
 
 
     def test_min_max_scaling(self, proxy):
-        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsr_opdata = proxy(RwNsrYang).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
         nsrs = nsr_opdata.nsr
         nsd_id = nsrs[0].nsd_ref
         nsr_id = nsrs[0].ns_instance_config_ref
 
         # group_name = "http_client_group"
 
-        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/scaling-group-record".format(nsr_id)
+        xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/scaling-group-record".format(quoted_key(nsr_id))
         scaling_records = proxy(RwNsrYang).get(xpath, list_obj=True)
 
         for scaling_record in scaling_records.scaling_group_record:
             group_name = scaling_record.scaling_group_name_ref
-            xpath = "/nsd-catalog/nsd[id='{}']/scaling-group-descriptor[name='{}']".format(
-                    nsd_id, group_name)
-            scaling_group_desc = proxy(NsdYang).get(xpath)
+            xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]/scaling-group-descriptor[name={}]".format(
+                    quoted_key(nsd_id), quoted_key(group_name))
+            scaling_group_desc = proxy(RwProjectNsdYang).get(xpath)
 
             # Add + 1 to go beyond the threshold
             for instance_id in range(1, scaling_group_desc.max_instance_count + 1):
-                xpath = '/ns-instance-config/nsr[id="{}"]/scaling-group[scaling-group-name-ref="{}"]'.format(
-                            nsr_id, 
-                            group_name)
+                xpath = '/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr[id={}]/scaling-group[scaling-group-name-ref={}]'.format(
+                            quoted_key(nsr_id), 
+                            quoted_key(group_name))
 
                 instance = ScalingGroupInstance.from_dict({"id": instance_id})
                 scaling_group = proxy(NsrYang).get(xpath)
@@ -155,10 +163,10 @@
                     assert instance_id == scaling_group_desc.max_instance_count
 
             for instance_id in range(1, scaling_group_desc.max_instance_count):
-                xpath = ('/ns-instance-config/nsr[id="{}"]/scaling-group'
-                         '[scaling-group-name-ref="{}"]/'
-                         'instance[id="{}"]').format(
-                         nsr_id, group_name, instance_id)
+                xpath = ('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr[id={}]/scaling-group'
+                         '[scaling-group-name-ref={}]/'
+                         'instance[id={}]').format(
+                         quoted_key(nsr_id), quoted_key(group_name), quoted_key(instance_id))
                 proxy(NsrYang).delete_config(xpath)
                 self.verify_scale_in(proxy, group_name, instance_id)
 
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_accounts_framework.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_accounts_framework.py
new file mode 100644
index 0000000..b69192b
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_accounts_framework.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python3
+"""
+#
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_accounts_framework.py
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 06/21/2017
+@brief Test logical account usage with vim and ro
+"""
+
+import gi
+import logging
+import os
+import pytest
+import random
+import re
+import subprocess
+import sys
+import time
+import uuid
+
+from contextlib import contextmanager
+
+import rift.auto.mano
+import rift.auto.session
+import rift.auto.descriptor
+
+import rift.mano.examples.ping_pong_nsd
+
+gi.require_version('RwVnfrYang', '1.0')
+from gi.repository import (
+    NsrYang,
+    RwProjectNsdYang,
+    VnfrYang,
+    RwNsrYang,
+    RwVnfrYang,
+    RwBaseYang,
+)
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+@pytest.fixture(scope='session')
+def descriptors_pingpong():
+    return rift.mano.examples.ping_pong_nsd.generate_ping_pong_descriptors(pingcount=1)
+
+@pytest.fixture(scope='session')
+def packages_pingpong(descriptors_pingpong):
+    return rift.auto.descriptor.generate_descriptor_packages(descriptors_pingpong)
+
+def VerifyAllInstancesRunning(mgmt_session):
+    ''' Verifies all network service instances reach running operational status '''
+    nsr_opdata = mgmt_session.proxy(RwNsrYang).get("/rw-project:project[rw-project:name='default']/ns-instance-opdata")
+    nsrs = nsr_opdata.nsr
+    for nsr in nsrs:
+        xpath = (
+            "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref='{ns_instance_config_ref}']/operational-status"
+        ).format(
+            ns_instance_config_ref=nsr.ns_instance_config_ref
+        )
+        mgmt_session.proxy(RwNsrYang).wait_for(xpath, "running", fail_on=['failed'], timeout=300)
+
+def VerifyAllInstancesConfigured(mgmt_session):
+    ''' Verifies all network service instances reach configured config status '''
+    nsr_opdata = mgmt_session.proxy(RwNsrYang).get("/rw-project:project[rw-project:name='default']/ns-instance-opdata")
+    nsrs = nsr_opdata.nsr
+    for nsr in nsrs:
+        xpath = (
+            "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status"
+        ).format(
+            nsr.ns_instance_config_ref
+        )
+        mgmt_session.proxy(RwNsrYang).wait_for(xpath, "configured", fail_on=['failed'], timeout=300)
+
+@pytest.mark.depends('launchpad')
+@pytest.mark.setup('descriptors')
+@pytest.mark.incremental
+class TestSetupPingpong(object):
+    def test_onboard(self, mgmt_session, packages_pingpong):
+        for descriptor_package in packages_pingpong:
+            rift.auto.descriptor.onboard(mgmt_session, descriptor_package)
+
+@pytest.mark.depends('descriptors')
+@pytest.mark.incremental
+class TestInstantiateVim:
+    def test_instantiate_vim(self, mgmt_session, cloud_account_name):
+        nsd_catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+        nsd = nsd_catalog.nsd[0]
+
+        nsr = rift.auto.descriptor.create_nsr(
+            cloud_account_name,
+            "pp_vim",
+            nsd,
+        )
+        mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+
+    def test_verify_running(self, mgmt_session):
+        VerifyAllInstancesRunning(mgmt_session)
+
+    def test_verify_configured(self, mgmt_session):
+        VerifyAllInstancesConfigured(mgmt_session)
+
+@pytest.mark.depends('descriptors')
+@pytest.mark.incremental
+class TestInstantiateRo:
+    def test_instantiate_ro(self, mgmt_session, cloud_account_name, ro_map):
+        nsd_catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+        nsd = nsd_catalog.nsd[0]
+
+        resource_orchestrator, datacenter = ro_map[cloud_account_name]
+        nsr = rift.auto.descriptor.create_nsr(
+            datacenter,
+            "pp_ro",
+            nsd,
+            resource_orchestrator=resource_orchestrator
+        )
+        mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+
+    def test_verify_running(self, mgmt_session):
+        VerifyAllInstancesRunning(mgmt_session)
+
+    def test_verify_configured(self, mgmt_session):
+        VerifyAllInstancesConfigured(mgmt_session)
+
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_floating_ip.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_floating_ip.py
new file mode 100644
index 0000000..5d3a6a3
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_floating_ip.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python3
+"""
+#
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+"""
+
+import gi
+import os
+
+import rift.auto.descriptor
+import rift.auto.mano as mano
+
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+
+from gi.repository import (
+    RwProjectNsdYang,
+    RwNsrYang,
+    RwVnfrYang,
+    RwProjectVnfdYang,
+    RwCloudYang
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+class TestFloatingIP(object):
+    """TestFloatingIP."""
+
+    # After RIFTIT-909 is completed this test will be set to working
+    valid_pool_names = ['FIP_SYSTEST_POOL_LARGE', 'public']
+    invalid_pool_names = ['', 'FIP_SYSTEST_POOL_EMPTY', 'invalid']
+
+    def create_cloud_account(
+            self, cloud_host, cloud_user, cloud_tenants, vim_ssl_enabled,
+            idx, mgmt_session):
+        """create_cloud_account."""
+        for cloud_tenant in cloud_tenants:
+            floating_ip_pool_names = (
+                self.valid_pool_names + self.invalid_pool_names)
+            project_name = 'float_project_{}'.format(idx)
+            password = 'mypasswd'
+            auth_url = 'http://{host}:5000/v3/'.format(host=cloud_host)
+            if vim_ssl_enabled is True:
+                auth_url = 'https://{host}:5000/v3/'.format(host=cloud_host)
+            mgmt_network = os.getenv('MGMT_NETWORK', 'private')
+            cloud_acc_name = 'cloud_account'
+            pool_name = floating_ip_pool_names[idx - 1]
+            cloud_account = (
+                RwCloudYang.
+                YangData_RwProject_Project_Cloud_Account.from_dict({
+                    'name': cloud_acc_name,
+                    'account_type': 'openstack',
+                    'openstack': {
+                        'admin': True,
+                        'key': cloud_user,
+                        'secret': password,
+                        'auth_url': auth_url,
+                        'tenant': cloud_tenant,
+                        'mgmt_network': mgmt_network,
+                        'floating_ip_pool': pool_name,
+                    }
+                }))
+            mano.create_cloud_account(
+                mgmt_session, cloud_account, project_name=project_name)
+
+    def yield_vnfd_vnfr_pairs(self, proxy, nsr=None):
+        """
+        Yield tuples of vnfd & vnfr entries.
+
+        Args:
+            proxy (callable): Launchpad proxy
+            nsr (optional): If specified, only the vnfr & vnfd records of the
+                NSR are returned
+
+        Yields:
+            Tuple: VNFD and its corresponding VNFR entry
+        """
+        def get_vnfd(vnfd_id):
+            xpath = (
+                "/rw-project:project[rw-project:name='default']/" +
+                "vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_id)))
+            return proxy(RwProjectVnfdYang).get(xpath)
+
+        vnfr = (
+            "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr")
+        vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True)
+        for vnfr in vnfrs.vnfr:
+
+            if nsr:
+                const_vnfr_ids = [const_vnfr.vnfr_id for const_vnfr in nsr.constituent_vnfr_ref]
+                if vnfr.id not in const_vnfr_ids:
+                    continue
+
+            vnfd = get_vnfd(vnfr.vnfd.id)
+            yield vnfd, vnfr
+
+    def test_floating_ip(
+            self, rw_user_proxy, rbac_user_passwd, user_domain, logger,
+            rw_project_proxy, rw_rbac_int_proxy, descriptors, mgmt_session,
+            cloud_user, cloud_tenants, vim_ssl_enabled, cloud_host,
+            fmt_nsd_catalog_xpath):
+        """test_floating_ip."""
+        proxy = mgmt_session.proxy
+        no_of_pool_name_cases = (
+            len(self.valid_pool_names + self.invalid_pool_names) + 1)
+        for idx in range(1, no_of_pool_name_cases):
+            project_name = 'float_project_{}'.format(idx)
+            user_name = 'float_user_{}'.format(idx)
+            project_role = 'rw-project:project-admin'
+            cloud_acc_name = 'cloud_account'
+            mano.create_user(
+                rw_user_proxy, user_name, rbac_user_passwd, user_domain)
+            mano.assign_project_role_to_user(
+                rw_project_proxy, project_role, user_name, project_name,
+                user_domain, rw_rbac_int_proxy)
+
+            self.create_cloud_account(
+                cloud_host, cloud_user, cloud_tenants,
+                vim_ssl_enabled, idx, mgmt_session)
+
+            for descriptor in descriptors:
+                rift.auto.descriptor.onboard(
+                    mgmt_session, descriptor, project=project_name)
+
+            nsd_pxy = mgmt_session.proxy(RwProjectNsdYang)
+            nsd_catalog = nsd_pxy.get_config(
+                fmt_nsd_catalog_xpath.format(project=quoted_key(project_name)))
+            assert nsd_catalog
+            nsd = nsd_catalog.nsd[0]
+            nsr = rift.auto.descriptor.create_nsr(
+                cloud_acc_name, nsd.name, nsd)
+            rwnsr_pxy = mgmt_session.proxy(RwNsrYang)
+
+            try:
+                rift.auto.descriptor.instantiate_nsr(
+                    nsr, rwnsr_pxy, logger, project=project_name)
+            except(Exception):
+                continue
+            for vnfd, vnfr in self.yield_vnfd_vnfr_pairs(proxy):
+                if idx > len(self.valid_pool_names):
+                    assert vnfr.vdur[0].management_ip is None
+                else:
+                    vnfr.vdur[0].management_ip is not None
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_ha_pingpong.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_ha_pingpong.py
new file mode 100644
index 0000000..02ed3a5
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_ha_pingpong.py
@@ -0,0 +1,329 @@
+#!/usr/bin/env python3
+"""
+#
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_launchpad.py
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 07/07/2016
+@brief High-availibility system test that runs ping pong workflow
+"""
+
+import gi
+import logging
+import os
+import pytest
+import random
+import re
+import subprocess
+import sys
+import time
+import uuid
+
+from contextlib import contextmanager
+
+import rift.auto.mano
+import rift.auto.session
+import rift.auto.descriptor
+
+gi.require_version('RwVnfrYang', '1.0')
+from gi.repository import (
+    NsrYang,
+    RwProjectNsdYang,
+    VnfrYang,
+    RwNsrYang,
+    RwVnfrYang,
+    RwBaseYang,
+)
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+@pytest.mark.setup('seed_random')
+class TestSeedRandom:
+    def test_seed_random(self, random_seed):
+        logger.info("Seeding number generator with seed {}".format(random_seed))
+        random.seed(random_seed)
+
+class MaxRetriesExceededException(Exception):
+    '''Indicates the maximum allowed number of retries has been exceeded for an operation
+    '''
+    pass
+
+class HAVerifyException(Exception):
+    '''Indicates a failure to verify correct HA behaviour
+    '''
+    pass
+
+
+class HASession:
+    ''' Wrapper around management session, which kills off system components
+    in order to trigger HA functionality
+    '''
+
+    DEFAULT_ATTEMPTS=3
+    DEFAULT_MIN_DELAY=0.0
+    DEFAULT_MAX_DELAY=1
+    DEFAULT_FREQUENCY=1
+    DEFAULT_RECOVERY_TIMEOUT=120
+
+    def __init__(self, session):
+        ''' Create a new HASession instance
+
+        Returns:
+            instance of HASession
+        '''
+        self.session = session
+        self.set_config()
+
+    @contextmanager
+    def config(self, *args, **kwargs):
+        ''' Context manager to allow HASession to temporarily have its config modified
+        '''
+        current_config = self.get_config()
+        self.set_config(*args, **kwargs)
+        yield
+        self.set_config(*current_config)
+
+    def get_config(self):
+        ''' Returns the current HA session config
+        '''
+        return (self.attempts, self.min_delay, self.max_delay, self.ha_frequency, self.recovery_timeout)
+
+    def set_config(self, attempts=None, min_delay=None, max_delay=None, ha_frequency=None, recovery_timeout=None):
+        ''' Set the HA session config, set default values for all config options not provided
+
+        Arguments:
+            attempts - Number of times to attempt an operation before failing
+            min_delay - minimum time that must elapse before session is allowed to kill a component
+            max_delay - maximum time that may elapse before killing a component
+            ha_frequency - frequency at which operations are tested for ha
+            recovery_timeout - time allowed for system to recovery after a component is killed
+        '''
+        if not attempts:
+            attempts = HASession.DEFAULT_ATTEMPTS
+        if not min_delay:
+            min_delay = HASession.DEFAULT_MIN_DELAY
+        if not max_delay:
+            max_delay = HASession.DEFAULT_MAX_DELAY
+        if not ha_frequency:
+            ha_frequency = HASession.DEFAULT_FREQUENCY
+        if not recovery_timeout:
+            recovery_timeout = HASession.DEFAULT_RECOVERY_TIMEOUT
+
+        self.attempts = attempts
+        self.min_delay = min_delay
+        self.max_delay = max_delay
+        self.ha_frequency = ha_frequency
+        self.recovery_timeout = recovery_timeout
+
+    def call(self, operation, *args, **kwargs):
+        ''' Call an operation using the wrapped management session, then
+        kill off a system component, and verify the operation still succeeds
+
+        Arguments:
+            operation - operation to be invoked
+        '''
+        # Choose to make the normal session call or do the HA test
+        if random.choice(range(0,int(1/self.ha_frequency))) != 0:
+            return operation(*args, **kwargs)
+
+        # Make sure we're starting from a running system
+        rift.vcs.vcs.wait_until_system_started(self.session)
+
+        def choose_any_tasklet(vcs_info):
+            tasklets = [component_info.component_name for component_info in vcs_info.components.component_info]
+            return random.choice(tasklets)
+
+        def choose_restartable_tasklet(vcs_info):
+            restartable_tasklets = [
+                component_info.component_name
+                for component_info in vcs_info.components.component_info
+                    if component_info.recovery_action == 'RESTART'
+                    and component_info.component_type == 'RWTASKLET'
+            ]
+            return random.choice(restartable_tasklets)
+
+        vcs_info = self.session.proxy(RwBaseYang).get('/vcs/info')
+        component_name = choose_restartable_tasklet(vcs_info)
+
+        ssh_cmd = 'ssh {} -o StrictHostKeyChecking=no -o BatchMode=yes'.format(self.session.host)
+        def get_component_process_pid(component_name):
+            cmd = '{} -- \'ps -ef | grep -v "grep" | grep rwmain | grep "{}" | tr -s " " | cut -d " " -f 2\''.format(ssh_cmd, component_name)
+            logger.info("Finding component [{}] pid using cmd: {}".format(component_name, cmd))
+            output = subprocess.check_output(cmd, shell=True)
+            return output.decode('ascii').strip()
+        process_pid = get_component_process_pid(component_name)
+        logger.info('{} has pid {}'.format(component_name, process_pid))
+
+        # Kick off a background process to kill the tasklet after some delay
+        delay = self.min_delay + (self.max_delay-self.min_delay)*random.random()
+        logger.info("Killing {} [{}] in {}".format(component_name, process_pid, delay))
+        cmd = '(sleep {} && {} -- "sudo kill -9 {}") &'.format(delay, ssh_cmd, process_pid)
+        os.system(cmd)
+
+        # Invoke session operation
+        now = time.time()
+        result = None
+        attempt = 0
+        while attempt < self.attempts:
+            try:
+                result = operation(*args, **kwargs)
+                # Possible improvement:  implement optional verify step here
+                break
+            except Exception:
+                logger.error('operation failed - {}'.format(operation))
+                attempt += 1
+            # If the operation failed, wait until recovery occurs to re-attempt
+            rift.vcs.vcs.wait_until_system_started(self.session)
+
+        if attempt >= self.attempts:
+            raise MaxRetriesExceededException("Killed %s [%d] - Subsequently failed operation : %s %s %s", component_name, process_pid, operation, args, kwargs )
+
+        # Wait until kill has definitely happened
+        elapsed = now - time.time()
+        remaining = delay - elapsed
+        if remaining > 0:
+            time.sleep(remaining)
+        time.sleep(3)
+
+        # Verify system reaches running status again
+        rift.vcs.vcs.wait_until_system_started(self.session)
+
+        # TODO: verify the tasklet process was actually restarted (got a new pid)
+        new_pid = get_component_process_pid(component_name)
+        if process_pid == new_pid:
+            raise HAVerifyException("Process pid unchanged : %d == %d ~ didn't die?" % (process_pid, new_pid))
+
+        return result
+
+@pytest.fixture
+def ha_session(mgmt_session):
+   return HASession(mgmt_session)
+
+@pytest.mark.depends('seed_random')
+@pytest.mark.setup('launchpad')
+@pytest.mark.incremental
+class TestLaunchpadSetup:
+    def test_create_cloud_accounts(self, ha_session, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+        '''Configure cloud accounts
+
+        Asserts:
+            Cloud name and cloud type details
+        '''
+        for cloud_account in cloud_accounts:
+            xpath = "{cloud_xpath}[name={cloud_account_name}]".format(
+                cloud_xpath=cloud_xpath,
+                cloud_account_name=quoted_key(cloud_account.name)
+            )
+            ha_session.call(mgmt_session.proxy(cloud_module).replace_config, xpath, cloud_account)
+            response = ha_session.call(mgmt_session.proxy(cloud_module).get, xpath)
+            assert response.name == cloud_account.name
+            assert response.account_type == cloud_account.account_type
+
+@pytest.mark.teardown('launchpad')
+@pytest.mark.incremental
+class TestLaunchpadTeardown:
+    def test_delete_cloud_accounts(self, ha_session, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+        '''Unconfigure cloud_account'''
+        for cloud_account in cloud_accounts:
+            xpath = "{cloud_xpath}[name={cloud_account_name}]".format(
+                cloud_xpath=cloud_xpath,
+                cloud_account_name=quoted_key(cloud_account.name)
+            )
+            ha_session.call(mgmt_session.proxy(cloud_module).delete_config, xpath)
+
+@pytest.mark.setup('pingpong')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestSetupPingpong(object):
+    def test_onboard(self, ha_session, mgmt_session, descriptors):
+        for descriptor in descriptors:
+            with ha_session.config(max_delay=15):
+                ha_session.call(rift.auto.descriptor.onboard, mgmt_session, descriptor)
+
+    def test_instantiate(self, ha_session, mgmt_session, cloud_account_name):
+        catalog = ha_session.call(mgmt_session.proxy(RwProjectNsdYang).get_config, '/nsd-catalog')
+        nsd = catalog.nsd[0]
+        nsr = rift.auto.descriptor.create_nsr(cloud_account_name, "pingpong_1", nsd)
+        ha_session.call(mgmt_session.proxy(RwNsrYang).create_config, '/ns-instance-config/nsr', nsr)
+
+@pytest.mark.depends('pingpong')
+@pytest.mark.teardown('pingpong')
+@pytest.mark.incremental
+class TestTeardownPingpong(object):
+    def test_teardown(self, ha_session, mgmt_session):
+        ns_instance_config = ha_session.call(mgmt_session.proxy(RwNsrYang).get_config, '/ns-instance-config')
+        for nsr in ns_instance_config.nsr:
+            ha_session.call(mgmt_session.proxy(RwNsrYang).delete_config, "/ns-instance-config/nsr[id={}]".format(quoted_key(nsr.id)))
+
+        time.sleep(60)
+        vnfr_catalog = ha_session.call(mgmt_session.proxy(RwVnfrYang).get, '/vnfr-catalog')
+        assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestLaunchpad:
+    def test_account_connection_status(self, ha_session, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+        '''Verify connection status on each cloud account
+
+        Asserts:
+            Cloud account is successfully connected
+        '''
+        for cloud_account in cloud_accounts:
+            with ha_session.config(attempts=2):
+                ha_session.call(
+                    mgmt_session.proxy(cloud_module).wait_for,
+                    '{}[name={}]/connection-status/status'.format(cloud_xpath, quoted_key(cloud_account.name)),
+                    'success',
+                    timeout=60,
+                    fail_on=['failure']
+                )
+
+@pytest.mark.depends('pingpong')
+@pytest.mark.incremental
+class TestPingpong:
+    def test_service_started(self, ha_session, mgmt_session):
+        nsr_opdata = ha_session.call(mgmt_session.proxy(RwNsrYang).get, '/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = (
+                "/ns-instance-opdata/nsr[ns-instance-config-ref={ns_instance_config_ref}]/operational-status"
+            ).format(
+                ns_instance_config_ref=quoted_key(nsr.ns_instance_config_ref)
+            )
+
+            with ha_session.config(attempts=2, max_delay=60):
+                ha_session.call(mgmt_session.proxy(RwNsrYang).wait_for, xpath, "running", fail_on=['failed'], timeout=300)
+
+    def test_service_configured(self, ha_session, mgmt_session):
+        nsr_opdata = ha_session.call(mgmt_session.proxy(RwNsrYang).get, '/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = (
+                "/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status"
+            ).format(
+                quoted_key(nsr.ns_instance_config_ref)
+            )
+
+            with ha_session.config(attempts=2, max_delay=60):
+                ha_session.call(mgmt_session.proxy(RwNsrYang).wait_for, xpath, "configured", fail_on=['failed'], timeout=300)
+
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_input_params.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_input_params.py
new file mode 100644
index 0000000..a549b41
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_input_params.py
@@ -0,0 +1,431 @@
+#!/usr/bin/env python3
+"""
+#
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_input_params.py
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 06/21/2017
+@brief Test of VNF Input parameters using ping pong
+"""
+
+import gi
+import logging
+import os
+import pytest
+import random
+import re
+import subprocess
+import sys
+import time
+import uuid
+
+from contextlib import contextmanager
+
+import rift.auto.mano
+import rift.auto.session
+import rift.auto.descriptor
+
+gi.require_version('RwVnfrYang', '1.0')
+from gi.repository import (
+    NsrYang,
+    RwProjectNsdYang,
+    VnfrYang,
+    RwNsrYang,
+    RwVnfrYang,
+    RwBaseYang,
+)
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+@pytest.fixture(scope='session')
+def global_vendor_name():
+    return 'global_vendor'
+
+@pytest.fixture(scope='session')
+def ping_custom_vendor_name():
+    return 'ping_vendor'
+
+@pytest.fixture(scope='session')
+def pong_custom_vendor_name():
+    return 'pong_vendor'
+
+@pytest.fixture(scope='session')
+def ping_custom_init_data():
+    return 'ping_custom_init_data'
+
+@pytest.fixture(scope='session')
+def pong_custom_init_data():
+    return 'pong_custom_init_data'
+
+@pytest.fixture(scope='session')
+def ping_custom_meta_data():
+    return 'ping_custom_meta_data'
+
+@pytest.fixture(scope='session')
+def pong_custom_meta_data():
+    return 'pong_custom_meta_data'
+
+@pytest.fixture(scope='session')
+def ping_custom_script_init_data():
+    return 'ping'
+
+@pytest.fixture(scope='session')
+def pong_custom_script_init_data():
+    return 'pong'
+
+@pytest.fixture(scope='session')
+def ping_descriptor(descriptors_pingpong_vnf_input_params):
+    return descriptors_pingpong_vnf_input_params[0]
+
+@pytest.fixture(scope='session')
+def pong_descriptor(descriptors_pingpong_vnf_input_params):
+    return descriptors_pingpong_vnf_input_params[1]
+
+@pytest.fixture(scope='session')
+def ping_pong_descriptor(descriptors_pingpong_vnf_input_params):
+    return descriptors_pingpong_vnf_input_params[2]
+
+@pytest.fixture(scope='session')
+def ping_id(ping_descriptor):
+    return ping_descriptor.vnfd.id
+
+@pytest.fixture(scope='session')
+def pong_id(pong_descriptor):
+    return pong_descriptor.vnfd.id
+
+@pytest.fixture(scope='session')
+def ping_script_descriptor(descriptors_pingpong_script_input_params):
+    return descriptors_pingpong_script_input_params[0]
+
+@pytest.fixture(scope='session')
+def pong_script_descriptor(descriptors_pingpong_script_input_params):
+    return descriptors_pingpong_script_input_params[1]
+
+@pytest.fixture(scope='session')
+def ping_pong_script_descriptor(descriptors_pingpong_script_input_params):
+    return descriptors_pingpong_script_input_params[2]
+
+@pytest.fixture(scope='session')
+def ping_script_id(ping_script_descriptor):
+    return ping_script_descriptor.vnfd.id
+
+@pytest.fixture(scope='session')
+def pong_script_id(pong_script_descriptor):
+    return pong_script_descriptor.vnfd.id
+
+
+def VerifyAllInstancesRunning(mgmt_session):
+    ''' Verifies all network service instances reach running operational status '''
+    nsr_opdata = mgmt_session.proxy(RwNsrYang).get("/rw-project:project[rw-project:name='default']/ns-instance-opdata")
+    nsrs = nsr_opdata.nsr
+    for nsr in nsrs:
+        xpath = (
+            "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref='{ns_instance_config_ref}']/operational-status"
+        ).format(
+            ns_instance_config_ref=nsr.ns_instance_config_ref
+        )
+        mgmt_session.proxy(RwNsrYang).wait_for(xpath, "running", fail_on=['failed'], timeout=300)
+
+def VerifyAllInstancesConfigured(mgmt_session):
+    ''' Verifies all network service instances reach configured config status '''
+    nsr_opdata = mgmt_session.proxy(RwNsrYang).get("/rw-project:project[rw-project:name='default']/ns-instance-opdata")
+    nsrs = nsr_opdata.nsr
+    for nsr in nsrs:
+        xpath = (
+            "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status"
+        ).format(
+            nsr.ns_instance_config_ref
+        )
+        mgmt_session.proxy(RwNsrYang).wait_for(xpath, "configured", fail_on=['failed'], timeout=300)
+
+@pytest.mark.depends('launchpad')
+@pytest.mark.setup('descriptors')
+@pytest.mark.incremental
+class TestSetupPingpong(object):
+    def test_onboard_custom_descriptors(self, mgmt_session, packages_pingpong_vnf_input_params, packages_pingpong_script_input_params):
+        for descriptor_package in packages_pingpong_vnf_input_params:
+            rift.auto.descriptor.onboard(mgmt_session, descriptor_package)
+        for descriptor_package in packages_pingpong_script_input_params:
+            rift.auto.descriptor.onboard(mgmt_session, descriptor_package)
+
+@pytest.mark.depends('descriptors')
+@pytest.mark.incremental
+class TestGlobalVnfInputParams:
+    def test_instantiate(self, mgmt_session, cloud_account_name, global_vendor_name):
+        ''' Testing vnf input parameters with broadest xpath expression allowed
+
+        /vnfd:vnfd-catalog/vnfd:vnfd/<leaf>
+        
+        Expected to replace the leaf in all member VNFs
+        '''
+
+        xpath = "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vendor"
+        value = global_vendor_name
+        vnf_input_parameter = rift.auto.descriptor.create_vnf_input_parameter(xpath, value)
+
+        nsd_catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+        nsd = [nsd for nsd in nsd_catalog.nsd if nsd.name == 'pp_input_nsd'][0]
+
+        nsr = rift.auto.descriptor.create_nsr(
+            cloud_account_name,
+            "pp_input_params_1",
+            nsd,
+            vnf_input_param_list=[vnf_input_parameter]
+        )
+        mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+
+
+    def test_verify_running(self, mgmt_session):
+        VerifyAllInstancesRunning(mgmt_session)
+
+    def test_verify_configured(self, mgmt_session):
+        VerifyAllInstancesConfigured(mgmt_session)
+
+    def test_verify_vnf_input_parameters(self, mgmt_session, ping_id, pong_id, global_vendor_name):
+        vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog")
+        ping_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == ping_id][0]
+        pong_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == pong_id][0]
+        ping_vendor_name = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vendor" % ping_vnfr.id)
+        assert ping_vendor_name == global_vendor_name
+        pong_vendor_name = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vendor" % pong_vnfr.id)
+        assert pong_vendor_name == global_vendor_name
+
+    def test_teardown(self, mgmt_session):
+        ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config("/rw-project:project[rw-project:name='default']/ns-instance-config")
+        for nsr in ns_instance_config.nsr:
+            mgmt_session.proxy(RwNsrYang).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id='{}']".format(nsr.id))
+        time.sleep(60)
+        vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/rw-project:project[rw-project:name='default']/vnfr-catalog")
+        assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
+@pytest.mark.depends('descriptors')
+@pytest.mark.incremental
+class TestMemberVnfInputParams:
+    def test_instantiate(self, mgmt_session, cloud_account_name, ping_id, pong_id, ping_custom_vendor_name, pong_custom_vendor_name):
+        ''' Testing vnf input parameters with member specific xpath expression
+
+        /vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='<member-id>'/<leaf>
+        
+        Expected to replace the leaf in a specific member VNF
+        '''
+
+        xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vendor" % (ping_id)
+        value = ping_custom_vendor_name
+        vnf_input_parameter_ping = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=ping_id)
+
+        xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vendor" % (pong_id)
+        value = pong_custom_vendor_name
+        vnf_input_parameter_pong = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=pong_id)
+
+        nsd_catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+        nsd = [nsd for nsd in nsd_catalog.nsd if nsd.name == 'pp_input_nsd'][0]
+
+        nsr = rift.auto.descriptor.create_nsr(
+            cloud_account_name,
+            "pp_input_params_2",
+            nsd,
+            vnf_input_param_list=[vnf_input_parameter_ping, vnf_input_parameter_pong]
+        )
+        mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+
+    def test_verify_running(self, mgmt_session):
+        VerifyAllInstancesRunning(mgmt_session)
+
+    def test_verify_configured(self, mgmt_session):
+        VerifyAllInstancesConfigured(mgmt_session)
+
+    def test_verify_vnf_input_parameters(self, mgmt_session, ping_id, pong_id, ping_custom_vendor_name, pong_custom_vendor_name):
+        vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog")
+        ping_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == ping_id][0]
+        pong_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == pong_id][0]
+        ping_vendor_name = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vendor" % ping_vnfr.id)
+        assert ping_vendor_name == ping_custom_vendor_name
+        pong_vendor_name = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vendor" % pong_vnfr.id)
+        assert pong_vendor_name == pong_custom_vendor_name
+
+    def test_teardown(self, mgmt_session):
+        ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config("/rw-project:project[rw-project:name='default']/ns-instance-config")
+        for nsr in ns_instance_config.nsr:
+            mgmt_session.proxy(RwNsrYang).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id='{}']".format(nsr.id))
+        time.sleep(60)
+        vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/rw-project:project[rw-project:name='default']/vnfr-catalog")
+        assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
+@pytest.mark.depends('descriptors')
+@pytest.mark.incremental
+class TestMemberVnfInputParamsCloudInit:
+    def test_instantiate(self, mgmt_session, cloud_account_name, ping_id, pong_id, ping_custom_init_data, pong_custom_init_data):
+        ''' Testing vnf input parameters with node specific xpath expression
+
+        /vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='<member-id>']/vnfd:vdu[vnfd:id="<vdu-id>"]/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name=<leaf-name>]/vnfd:value 
+        /vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='<member-id>'/<leaf>
+        
+        Expected to replace the leaf in a specific member VNF
+        '''
+
+        xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='iovdu_0']/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='custom_cloud_init_data']/vnfd:value" % (ping_id)
+        value = ping_custom_init_data
+        vnf_input_parameter_ping = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=ping_id)
+
+        xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='iovdu_0']/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='custom_cloud_init_data']/vnfd:value" % (pong_id)
+        value = pong_custom_init_data
+        vnf_input_parameter_pong = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=pong_id)
+
+
+        nsd_catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+        nsd = [nsd for nsd in nsd_catalog.nsd if nsd.name == 'pp_input_nsd'][0]
+
+        nsr = rift.auto.descriptor.create_nsr(
+            cloud_account_name,
+            "pp_input_params_3",
+            nsd,
+            vnf_input_param_list=[vnf_input_parameter_ping, vnf_input_parameter_pong]
+        )
+        mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+
+    def test_verify_running(self, mgmt_session):
+        VerifyAllInstancesRunning(mgmt_session)
+
+    def test_verify_configured(self, mgmt_session):
+        VerifyAllInstancesConfigured(mgmt_session)
+
+    def test_verify_vnf_input_parameters(self, mgmt_session, ping_id, pong_id, ping_custom_init_data, pong_custom_init_data):
+        ''' Verify both ping and pong init data were replaced with their respective init data
+        '''
+        vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog")
+        ping_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == ping_id][0]
+        pong_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == pong_id][0]
+
+        # Verify the data was replaced in the vdu
+        ping_init_data = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vnfd/vdu/supplemental-boot-data/custom-meta-data[name='custom_cloud_init_data']/value" % (ping_vnfr.id))
+        assert ping_init_data == ping_custom_init_data
+        pong_init_data = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vnfd/vdu/supplemental-boot-data/custom-meta-data[name='custom_cloud_init_data']/value" % (pong_vnfr.id))
+        assert pong_init_data == pong_custom_init_data
+
+    def test_teardown(self, mgmt_session):
+        ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config("/rw-project:project[rw-project:name='default']/ns-instance-config")
+        for nsr in ns_instance_config.nsr:
+            mgmt_session.proxy(RwNsrYang).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id='{}']".format(nsr.id))
+        time.sleep(60)
+        vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/rw-project:project[rw-project:name='default']/vnfr-catalog")
+        assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
+@pytest.mark.depends('descriptors')
+@pytest.mark.incremental
+class TestMemberVnfInputParamsCloudMeta:
+    def test_instantiate(self, mgmt_session, cloud_account_name, ping_id, pong_id, ping_custom_meta_data, pong_custom_meta_data):
+        ''' Testing vnf input parameters with node specific xpath expression
+
+        /vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='<member-id>']/vnfd:vdu[vnfd:id="<vdu-id>"]/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name=<leaf-name>]/vnfd:value 
+        /vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='<member-id>'/<leaf>
+        
+        Expected to replace the leaf in a specific member VNF
+        '''
+
+        xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='iovdu_0']/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='custom_cloud_meta_data']/vnfd:value" % (ping_id)
+        value = ping_custom_meta_data
+        vnf_input_parameter_ping = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=ping_id)
+
+        xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='iovdu_0']/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='custom_cloud_meta_data']/vnfd:value" % (pong_id)
+        value = pong_custom_meta_data
+        vnf_input_parameter_pong = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=pong_id)
+
+
+        nsd_catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+        nsd = [nsd for nsd in nsd_catalog.nsd if nsd.name == 'pp_input_nsd'][0]
+
+        nsr = rift.auto.descriptor.create_nsr(
+            cloud_account_name,
+            "pp_input_params_4",
+            nsd,
+            vnf_input_param_list=[vnf_input_parameter_ping, vnf_input_parameter_pong]
+        )
+        mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+
+    def test_verify_running(self, mgmt_session):
+        VerifyAllInstancesRunning(mgmt_session)
+
+    def test_verify_configured(self, mgmt_session):
+        VerifyAllInstancesConfigured(mgmt_session)
+
+    def test_verify_vnf_input_parameters(self, mgmt_session, ping_id, pong_id, ping_custom_meta_data, pong_custom_meta_data):
+        ''' Verify both ping and pong meta data were replaced with their respective meta data
+        '''
+        vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog")
+        ping_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == ping_id][0]
+        pong_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == pong_id][0]
+
+        # Verify the data was replaced in the vdu
+        ping_meta_data = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vnfd/vdu/supplemental-boot-data/custom-meta-data[name='custom_cloud_meta_data']/value" % (ping_vnfr.id))
+        assert ping_meta_data == ping_custom_meta_data
+        pong_meta_data = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vnfd/vdu/supplemental-boot-data/custom-meta-data[name='custom_cloud_meta_data']/value" % (pong_vnfr.id))
+        assert pong_meta_data == pong_custom_meta_data
+
+        # Verify the data was also replaced in the vdur
+        ping_meta_data = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vdur/supplemental-boot-data/custom-meta-data[name='custom_cloud_meta_data']/value" % (ping_vnfr.id))
+        assert ping_meta_data == ping_custom_meta_data
+        pong_meta_data = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vdur/supplemental-boot-data/custom-meta-data[name='custom_cloud_meta_data']/value" % (pong_vnfr.id))
+        assert pong_meta_data == pong_custom_meta_data
+
+    def test_teardown(self, mgmt_session):
+        ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config("/rw-project:project[rw-project:name='default']/ns-instance-config")
+        for nsr in ns_instance_config.nsr:
+            mgmt_session.proxy(RwNsrYang).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id='{}']".format(nsr.id))
+        time.sleep(60)
+        vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/rw-project:project[rw-project:name='default']/vnfr-catalog")
+        assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
+
+@pytest.mark.depends('descriptors')
+@pytest.mark.incremental
+@pytest.mark.skipif(True, reason='RIFT-18171 - Disabled due to cloud init failure on userdata supplied bash scripts')
+class TestMemberVnfInputParamsInitScripts:
+    def test_instantiate(self, mgmt_session, cloud_account_name, ping_script_id, pong_script_id, ping_custom_script_init_data, pong_custom_script_init_data):
+        ''' Testing replacement of vnf input parameters with node specific xpath expression in init scripts
+
+        /vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='<member-id>']/vnfd:vdu[vnfd:id="<vdu-id>"]/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name=<leaf-name>]/vnfd:value 
+        /vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='<member-id>'/<leaf>
+
+        Expected to replace the leaf in a specific member VNF
+        '''
+
+        xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='iovdu_0']/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='CI-script-init-data']/vnfd:value" % (ping_script_id)
+        value = ping_custom_script_init_data
+        vnf_input_parameter_ping = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=ping_script_id)
+
+        xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='iovdu_0']/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='CI-script-init-data']/vnfd:value" % (pong_script_id)
+        value = pong_custom_script_init_data
+        vnf_input_parameter_pong = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=pong_script_id)
+
+        nsd_catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+        nsd = [nsd for nsd in nsd_catalog.nsd if nsd.name == 'pp_script_nsd'][0]
+
+        nsr = rift.auto.descriptor.create_nsr(
+            cloud_account_name,
+            "pp_input_params_5",
+            nsd,
+            vnf_input_param_list=[vnf_input_parameter_ping, vnf_input_parameter_pong]
+        )
+        mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+
+    def test_verify_running(self, mgmt_session):
+        VerifyAllInstancesRunning(mgmt_session)
+
+    def test_verify_configured(self, mgmt_session):
+        # Configuration will only succeed if the replacement was sucessful
+        VerifyAllInstancesConfigured(mgmt_session)
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_mro_pingpong.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_mro_pingpong.py
new file mode 100644
index 0000000..45407db
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_mro_pingpong.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+"""
+#
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_mro_pingpong.py
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 06/21/2017
+@brief Multi-RO test that instantiates two ping pong instances on seperate ROs
+"""
+
+import gi
+import logging
+import os
+import pytest
+import random
+import re
+import subprocess
+import sys
+import time
+import uuid
+
+from contextlib import contextmanager
+
+import rift.auto.mano
+import rift.auto.session
+import rift.auto.descriptor
+
+gi.require_version('RwVnfrYang', '1.0')
+from gi.repository import (
+    NsrYang,
+    RwProjectNsdYang,
+    VnfrYang,
+    RwNsrYang,
+    RwVnfrYang,
+    RwBaseYang,
+)
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+@pytest.mark.setup('pingpong')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestSetupPingpong(object):
+    def test_onboard(self, mgmt_session, descriptors):
+        for descriptor in descriptors:
+            rift.auto.descriptor.onboard(mgmt_session, descriptor)
+
+    def test_instantiate(self, mgmt_session, ro_account_info):
+        catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+        nsd = catalog.nsd[0]
+        instance_id = 0
+        for resource_orchestrator, account_info in ro_account_info.items():
+            for datacenter in account_info['datacenters']:
+                nsr = rift.auto.descriptor.create_nsr(
+                        datacenter,
+                        "pingpong_{}".format(instance_id),
+                        nsd,
+                        resource_orchestrator=resource_orchestrator
+                )
+                mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+                instance_id += 1
+
+
+@pytest.mark.depends('pingpong')
+@pytest.mark.incremental
+class TestPingpong:
+    def test_service_started(self, mgmt_session):
+        nsr_opdata = mgmt_session.proxy(RwNsrYang).get("/rw-project:project[rw-project:name='default']/ns-instance-opdata")
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = (
+                "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={ns_instance_config_ref}]/operational-status"
+            ).format(
+                ns_instance_config_ref=quoted_key(nsr.ns_instance_config_ref)
+            )
+            mgmt_session.proxy(RwNsrYang).wait_for(xpath, "running", fail_on=['failed'], timeout=300)
+
+    def test_service_configured(self, mgmt_session):
+        nsr_opdata = mgmt_session.proxy(RwNsrYang).get("/rw-project:project[rw-project:name='default']/ns-instance-opdata")
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = (
+                "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status"
+            ).format(
+                quoted_key(nsr.ns_instance_config_ref)
+            )
+            mgmt_session.proxy(RwNsrYang).wait_for(xpath, "configured", fail_on=['failed'], timeout=300)
+
+@pytest.mark.depends('pingpong')
+@pytest.mark.teardown('pingpong')
+@pytest.mark.incremental
+class TestTeardownPingpong(object):
+    def test_teardown(self, mgmt_session):
+        ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config("/rw-project:project[rw-project:name='default']/ns-instance-config")
+        for nsr in ns_instance_config.nsr:
+            mgmt_session.proxy(RwNsrYang).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(quoted_key(nsr.id)))
+
+        time.sleep(60)
+        vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/rw-project:project[rw-project:name='default']/vnfr-catalog")
+        assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py
index 45a7832..f2d6695 100644
--- a/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py
@@ -23,45 +23,50 @@
 @brief Launchpad System Test
 """
 
+import gi
 import json
 import logging
 import os
 import pytest
-import shlex
 import requests
+import shlex
 import shutil
 import subprocess
 import tempfile
 import time
 import uuid
 
+import rift.auto.descriptor
 import rift.auto.mano
 import rift.auto.session
 import rift.mano.examples.ping_pong_nsd as ping_pong
 
-import gi
 gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
 gi.require_version('RwLaunchpadYang', '1.0')
 gi.require_version('RwBaseYang', '1.0')
 
 from gi.repository import (
-    NsdYang,
+    RwProjectNsdYang,
     RwNsrYang,
     RwVnfrYang,
     NsrYang,
     VnfrYang,
     VldYang,
-    RwVnfdYang,
+    RwProjectVnfdYang,
     RwLaunchpadYang,
     RwBaseYang
 )
 
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
 logging.basicConfig(level=logging.DEBUG)
 
 @pytest.fixture(scope='module')
 def vnfd_proxy(request, mgmt_session):
-    return mgmt_session.proxy(RwVnfdYang)
+    return mgmt_session.proxy(RwProjectVnfdYang)
 
 @pytest.fixture(scope='module')
 def rwvnfr_proxy(request, mgmt_session):
@@ -73,7 +78,7 @@
 
 @pytest.fixture(scope='module')
 def nsd_proxy(request, mgmt_session):
-    return mgmt_session.proxy(NsdYang)
+    return mgmt_session.proxy(RwProjectNsdYang)
 
 @pytest.fixture(scope='module')
 def rwnsr_proxy(request, mgmt_session):
@@ -86,30 +91,6 @@
 class DescriptorOnboardError(Exception):
     pass
 
-def create_nsr(nsd, input_param_list, cloud_account_name):
-    """
-    Create the NSR record object
-
-    Arguments:
-        nsd                 - NSD
-        input_param_list    - list of input-parameter objects
-        cloud_account_name  - name of cloud account
-
-    Return:
-         NSR object
-    """
-    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
-
-    nsr.id = str(uuid.uuid4())
-    nsr.name = rift.auto.mano.resource_name(nsr.id)
-    nsr.short_name = "nsr_short_name"
-    nsr.description = "This is a description"
-    nsr.nsd.from_dict(nsr.as_dict())
-    nsr.admin_status = "ENABLED"
-    nsr.input_parameter.extend(input_param_list)
-    nsr.cloud_account = cloud_account_name
-
-    return nsr
 
 def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
     curl_cmd = 'curl --insecure -F "descriptor=@{file}" https://{host}:4567/api/upload'.format(
@@ -125,10 +106,10 @@
 
     return transaction_id
 
-def wait_onboard_transaction_finished(logger, transaction_id, timeout=30, host="127.0.0.1"):
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=30, host="127.0.0.1", project="default"):
 
     def check_status_onboard_status():
-        uri = 'https://%s:4567/api/upload/%s/state' % (host, transaction_id)
+        uri = 'https://%s:8008/api/operational/project/%s/create-jobs/job/%s' % (host, project, transaction_id)
         curl_cmd = 'curl --insecure {uri}'.format(uri=uri)
         return subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
 
@@ -188,13 +169,13 @@
     """
     logger.debug("Terminating Ping Pong NSRs")
 
-    nsr_path = "/ns-instance-config"
+    nsr_path = "/rw-project:project[rw-project:name='default']/ns-instance-config"
     nsr = rwnsr_proxy.get_config(nsr_path)
     nsrs = nsr.nsr
 
     xpaths = []
     for ping_pong in nsrs:
-        xpath = "/ns-instance-config/nsr[id='{}']".format(ping_pong.id)
+        xpath = "/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(quoted_key(ping_pong.id))
         rwnsr_proxy.delete_config(xpath)
         xpaths.append(xpath)
 
@@ -204,14 +185,14 @@
         assert nsr is None
 
     # Get the ns-instance-config
-    ns_instance_config = rwnsr_proxy.get_config("/ns-instance-config")
+    ns_instance_config = rwnsr_proxy.get_config("/rw-project:project[rw-project:name='default']/ns-instance-config")
 
     # Termination tests
-    vnfr = "/vnfr-catalog/vnfr"
+    vnfr = "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr"
     vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
     assert vnfrs is None or len(vnfrs.vnfr) == 0
 
-    # nsr = "/ns-instance-opdata/nsr"
+    # nsr = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr"
     # nsrs = rwnsr_proxy.get(nsr, list_obj=True)
     # assert len(nsrs.nsr) == 0
 
@@ -297,7 +278,7 @@
         """Generates & On-boards the descriptors.
         """
         temp_dirs = []
-        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
         endpoint = "upload"
 
         """
@@ -319,7 +300,7 @@
                         scheme,
                         cert)
 
-            catalog = vnfd_proxy.get_config('/vnfd-catalog')
+            catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
             vnfds = catalog.vnfd
             assert len(vnfds) == 2, "There should two vnfds"
             assert "ping_vnfd" in [vnfds[0].name, vnfds[1].name]
@@ -327,13 +308,13 @@
 
 
         def delete_vnfds():
-            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
             for vnfd_record in vnfds.vnfd:
-                xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+                xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
                 vnfd_proxy.delete_config(xpath)
 
             time.sleep(5)
-            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
             assert vnfds is None or len(vnfds.vnfd) == 0
 
 
@@ -380,7 +361,7 @@
                 scheme,
                 cert)
 
-        catalog = nsd_proxy.get_config('/nsd-catalog')
+        catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
         nsds = catalog.nsd
         assert len(nsds) == 1, "There should only be a single nsd"
         assert nsds[0].name == "ping_pong_nsd"
@@ -389,7 +370,7 @@
 #         for temp_dir in temp_dirs:
 #             temp_dir.cleanup()
 
-    def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account):
+    def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account, use_accounts):
 
         def verify_input_parameters(running_config, config_param):
             """
@@ -405,49 +386,66 @@
                                                                            config_param.value,
                                                                            running_config.input_parameter))
 
-        catalog = nsd_proxy.get_config('/nsd-catalog')
+        catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
         nsd = catalog.nsd[0]
 
         input_parameters = []
-        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
+        descr_xpath = "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id=%s]/project-nsd:vendor" % quoted_key(nsd.id)
         descr_value = "automation"
         in_param_id = str(uuid.uuid4())
 
-        input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+        input_param_1 = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
                                                                 xpath=descr_xpath,
                                                                 value=descr_value)
 
         input_parameters.append(input_param_1)
 
-        nsr = create_nsr(nsd, input_parameters, cloud_account.name)
+        nsr_id = str(uuid.uuid4())
+        if use_accounts:
+            nsr = rift.auto.descriptor.create_nsr(
+                    cloud_account.name,
+                    nsr_id,
+                    nsd, 
+                    input_param_list=input_parameters,
+                    account=cloud_account.name,
+                    nsr_id=nsr_id
+            )
+        else:
+            nsr = rift.auto.descriptor.create_nsr(
+                    cloud_account.name,
+                    nsr_id,
+                    nsd, 
+                    input_param_list=input_parameters,
+                    nsr_id=nsr_id
+            )
 
         logger.info("Instantiating the Network Service")
-        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+        rwnsr_proxy.create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr)
 
-        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+        nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata/nsr[ns-instance-config-ref={}]'.format(quoted_key(nsr.id)))
         assert nsr_opdata is not None
 
         # Verify the input parameter configuration
-        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        running_config = rwnsr_proxy.get_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id=%s]" % quoted_key(nsr.id))
         for input_param in input_parameters:
             verify_input_parameters(running_config, input_param)
 
     def test_wait_for_pingpong_started(self, rwnsr_proxy):
-        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
         nsrs = nsr_opdata.nsr
 
         for nsr in nsrs:
-            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(
-                    nsr.ns_instance_config_ref)
+            xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(
+                    quoted_key(nsr.ns_instance_config_ref))
             rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=180)
 
     def test_wait_for_pingpong_configured(self, rwnsr_proxy):
-        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
         nsrs = nsr_opdata.nsr
 
         for nsr in nsrs:
-            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(
-                    nsr.ns_instance_config_ref)
+            xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(
+                    quoted_key(nsr.ns_instance_config_ref))
             rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
 
 
@@ -472,7 +470,7 @@
         """Generates & On-boards the descriptors.
         """
         temp_dirs = []
-        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
         endpoint = "update"
         ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records                
 
@@ -495,7 +493,7 @@
                         scheme,
                         cert)
 
-            catalog = vnfd_proxy.get_config('/vnfd-catalog')
+            catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
             vnfds = catalog.vnfd
 
             assert len(vnfds) == 2, "There should two vnfds"
@@ -503,24 +501,24 @@
             assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name]
 
         def delete_nsds():
-            nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+            nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
             for nsd_record in nsds.nsd:
-                xpath = "/nsd-catalog/nsd[id='{}']".format(nsd_record.id)
+                xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(quoted_key(nsd_record.id))
                 nsd_proxy.delete_config(xpath)
 
             time.sleep(5)
-            nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+            nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
             assert nsds is None or len(nsds.nsd) == 0
         delete_nsds()
 
         def delete_vnfds():
-            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
             for vnfd_record in vnfds.vnfd:
-                xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+                xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
                 vnfd_proxy.delete_config(xpath)
 
             time.sleep(5)
-            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
             assert vnfds is None or len(vnfds.vnfd) == 0
 
         delete_vnfds()
@@ -569,7 +567,7 @@
                 scheme,
                 cert)
 
-        catalog = nsd_proxy.get_config('/nsd-catalog')
+        catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
         nsds = catalog.nsd
         assert len(nsds) == 1, "There should only be a single nsd"
         assert nsds[0].name == "ping_pong_nsd"
@@ -578,7 +576,7 @@
 #         for temp_dir in temp_dirs:
 #             temp_dir.cleanup()
 
-    def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account):
+    def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account, use_accounts):
         def verify_input_parameters(running_config, config_param):
             """
             Verify the configured parameter set against the running configuration
@@ -593,49 +591,66 @@
                                                                            config_param.value,
                                                                            running_config.input_parameter))
 
-        catalog = nsd_proxy.get_config('/nsd-catalog')
+        catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
         nsd = catalog.nsd[0]
 
         input_parameters = []
-        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
+        descr_xpath = "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id=%s]/project-nsd:vendor" % quoted_key(nsd.id)
         descr_value = "automation"
         in_param_id = str(uuid.uuid4())
 
-        input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+        input_param_1 = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
                                                                 xpath=descr_xpath,
                                                                 value=descr_value)
 
         input_parameters.append(input_param_1)
 
-        nsr = create_nsr(nsd, input_parameters, cloud_account.name)
+        nsr_id = str(uuid.uuid4())
+        if use_accounts:
+            nsr = rift.auto.descriptor.create_nsr(
+                    cloud_account.name,
+                    nsr_id,
+                    nsd, 
+                    input_param_list=input_parameters,
+                    account=cloud_account.name,
+                    nsr_id=nsr_id
+            )
+        else:
+            nsr = rift.auto.descriptor.create_nsr(
+                    cloud_account.name,
+                    nsr_id,
+                    nsd, 
+                    input_param_list=input_parameters,
+                    nsr_id=nsr_id
+            )
 
         logger.info("Instantiating the Network Service")
-        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+        rwnsr_proxy.create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr)
 
-        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+        nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata/nsr[ns-instance-config-ref={}]'.format(quoted_key(nsr.id)))
         assert nsr_opdata is not None
 
         # Verify the input parameter configuration
-        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        running_config = rwnsr_proxy.get_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id=%s]" % quoted_key(nsr.id))
         for input_param in input_parameters:
             verify_input_parameters(running_config, input_param)
 
     def test_wait_for_pingpong_started(self, rwnsr_proxy):
-        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
         nsrs = nsr_opdata.nsr
 
         for nsr in nsrs:
-            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(
-                    nsr.ns_instance_config_ref)
+            xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(
+                   quoted_key(nsr.ns_instance_config_ref)) 
             rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=180)
 
     def test_wait_for_pingpong_configured(self, rwnsr_proxy):
-        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
         nsrs = nsr_opdata.nsr
 
         for nsr in nsrs:
-            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(
-                    nsr.ns_instance_config_ref)
+            xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(
+                   quoted_key(nsr.ns_instance_config_ref)) 
             rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
 
 
@@ -660,18 +675,18 @@
         Asserts:
             The records are deleted.
         """
-        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
         for nsd in nsds.nsd:
-            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(quoted_key(nsd.id))
             nsd_proxy.delete_config(xpath)
 
-        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
         assert nsds is None or len(nsds.nsd) == 0
 
-        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
         for vnfd_record in vnfds.vnfd:
-            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
             vnfd_proxy.delete_config(xpath)
 
-        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
         assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong_longevity.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong_longevity.py
index ff8fa96..9f70feb 100644
--- a/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong_longevity.py
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong_longevity.py
@@ -18,11 +18,14 @@
 # Creation Date: 2016/01/04
 #
 
+import gi
 import pytest
-import rift.vcs.vcs
 import time
 
-import gi
+import rift.vcs.vcs
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
 
 @pytest.fixture(scope='module')
 def rwnsr_proxy(mgmt_session):
@@ -32,11 +35,11 @@
     time.sleep(60)
     rift.vcs.vcs.wait_until_system_started(mgmt_session)
 
-    nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+    nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
     for nsr in nsr_opdata.nsr:
-        xpath = ("/ns-instance-opdata"
-                 "/nsr[ns-instance-config-ref='%s']"
-                 "/operational-status") % (nsr.ns_instance_config_ref)
+        xpath = ("/rw-project:project[rw-project:name='default']/ns-instance-opdata"
+                 "/nsr[ns-instance-config-ref=%s]"
+                 "/operational-status") % (quoted_key(nsr.ns_instance_config_ref))
         operational_status = rwnsr_proxy.get(xpath)
         assert operational_status == 'running'
 
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py
index 9f1cd0a..5198be9 100644
--- a/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py
@@ -1,6 +1,6 @@
 
 # 
-#   Copyright 2016 RIFT.IO Inc
+#   Copyright 2016-2017 RIFT.io Inc
 #
 #   Licensed under the Apache License, Version 2.0 (the "License");
 #   you may not use this file except in compliance with the License.
@@ -16,30 +16,37 @@
 #
 
 import collections
+import gi
+import json
+import operator
+import os
+import pytest
+import re
 import socket
 import subprocess
 import time
 
-import pytest
-
-import gi
-import re
+from scapy.all import rdpcap, UDP, TCP, IP
 gi.require_version('RwNsrYang', '1.0')
 from gi.repository import (
-        NsdYang,
+        RwProjectNsdYang,
         RwBaseYang,
         RwConmanYang,
         RwNsrYang,
-        RwNsdYang,
         RwVcsYang,
         RwVlrYang,
-        RwVnfdYang,
+        RwProjectVnfdYang,
         RwVnfrYang,
         VlrYang,
         VnfrYang,
+        NsrYang,
         )
+import rift.auto.mano
 import rift.auto.session
 import rift.mano.examples.ping_pong_nsd as ping_pong
+from rift.auto.ssh import SshSession
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
 
 
 @pytest.fixture(scope='module')
@@ -53,6 +60,28 @@
     '''
     return ping_pong_factory.generate_descriptors()
 
+@pytest.fixture(scope='session')
+def updated_ping_pong_descriptors(updated_ping_pong_records):
+    '''Fixture which returns a set of updated descriptors that can be configured through
+    the management interface.
+
+    The descriptors generated by the descriptor generation process for packages don't include project 
+    information (presumably in order to avoid tying them to particular project). Here they are converted
+    to types that include project information which can then be used to configure the system.
+    '''
+    ping, pong, ping_pong = updated_ping_pong_records
+    proj_ping_vnfd = RwProjectVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict(ping.vnfd.as_dict())
+    proj_pong_vnfd = RwProjectVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict(pong.vnfd.as_dict())
+    proj_ping_pong_nsd = RwProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd.from_dict(ping_pong.descriptor.as_dict()['nsd'][0])
+    return proj_ping_vnfd, proj_pong_vnfd, proj_ping_pong_nsd
+
+
+class JobStatusError(Exception):
+    """JobStatusError."""
+
+    pass
+
+
 def yield_vnfd_vnfr_pairs(proxy, nsr=None):
     """
     Yields tuples of vnfd & vnfr entries.
@@ -66,10 +95,10 @@
         Tuple: VNFD and its corresponding VNFR entry
     """
     def get_vnfd(vnfd_id):
-        xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_id)
-        return proxy(RwVnfdYang).get(xpath)
+        xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_id))
+        return proxy(RwProjectVnfdYang).get(xpath)
 
-    vnfr = "/vnfr-catalog/vnfr"
+    vnfr = "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr"
     vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True)
     for vnfr in vnfrs.vnfr:
 
@@ -93,9 +122,9 @@
     """
 
     for nsr_cfg, nsr in yield_nsrc_nsro_pairs(proxy):
-        nsd_path = "/nsd-catalog/nsd[id='{}']".format(
-                nsr_cfg.nsd.id)
-        nsd = proxy(RwNsdYang).get_config(nsd_path)
+        nsd_path = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(
+                quoted_key(nsr_cfg.nsd.id))
+        nsd = proxy(RwProjectNsdYang).get_config(nsd_path)
 
         yield nsd, nsr
 
@@ -108,11 +137,11 @@
     Yields:
         Tuple: NSR config and its corresponding NSR op record
     """
-    nsr = "/ns-instance-opdata/nsr"
+    nsr = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr"
     nsrs = proxy(RwNsrYang).get(nsr, list_obj=True)
     for nsr in nsrs.nsr:
-        nsr_cfg_path = "/ns-instance-config/nsr[id='{}']".format(
-                nsr.ns_instance_config_ref)
+        nsr_cfg_path = "/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(
+                quoted_key(nsr.ns_instance_config_ref))
         nsr_cfg = proxy(RwNsrYang).get_config(nsr_cfg_path)
 
         yield nsr_cfg, nsr
@@ -143,12 +172,22 @@
             boolean
         """
         try:
-            socket.inet_aton(address)
+            socket.inet_pton(socket.AF_INET, address)
+            return True
+        except socket.error:
+            try:
+                socket.inet_pton(socket.AF_INET6, address)
+                return True
+            except socket.error:
+                return False
+
+    def is_ipv6(self, address):
+        """Returns True if address is of type 'IPv6', else False."""
+        try:
+            socket.inet_pton(socket.AF_INET6, address)
+            return True
         except socket.error:
             return False
-        else:
-            return True
-
 
     @pytest.mark.feature("recovery")
     def test_tasklets_recovery(self, mgmt_session, proxy, recover_tasklet):
@@ -180,30 +219,12 @@
     def test_records_present(self, proxy):
         assert_records(proxy)
 
-    def test_nsd_ref_count(self, proxy):
-        """
-        Asserts
-        1. The ref count data of the NSR with the actual number of NSRs
-        """
-        nsd_ref_xpath = "/ns-instance-opdata/nsd-ref-count"
-        nsd_refs = proxy(RwNsrYang).get(nsd_ref_xpath, list_obj=True)
-
-        expected_ref_count = collections.defaultdict(int)
-        for nsd_ref in nsd_refs.nsd_ref_count:
-            expected_ref_count[nsd_ref.nsd_id_ref] = nsd_ref.instance_ref_count
-
-        actual_ref_count = collections.defaultdict(int)
-        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
-            actual_ref_count[nsd.id] += 1
-
-        assert expected_ref_count == actual_ref_count
-
     def test_vnfd_ref_count(self, proxy):
         """
         Asserts
         1. The ref count data of the VNFR with the actual number of VNFRs
         """
-        vnfd_ref_xpath = "/vnfr-catalog/vnfd-ref-count"
+        vnfd_ref_xpath = "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfd-ref-count"
         vnfd_refs = proxy(RwVnfrYang).get(vnfd_ref_xpath, list_obj=True)
 
         expected_ref_count = collections.defaultdict(int)
@@ -243,12 +264,23 @@
         for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
             assert vnfd.mgmt_interface.port == vnfr.mgmt_interface.port
             assert len(vnfd.vdu) == len(vnfr.vdur)
-
             for vdud, vdur in zip(vnfd.vdu, vnfr.vdur):
-                assert vdud.vm_flavor == vdur.vm_flavor
+                for field in vdud.vm_flavor.fields:
+                    if field in vdur.vm_flavor.fields:
+                        assert getattr(vdud.vm_flavor, field) == getattr(vdur.vm_flavor, field)
                 assert self.is_valid_ip(vdur.management_ip) is True
-                assert vdud.external_interface[0].vnfd_connection_point_ref == \
-                    vdur.external_interface[0].vnfd_connection_point_ref
+
+                vdur_intf_dict = {}
+                for intf in vdur.interface:
+                    vdur_intf_dict[intf.name] = intf.external_connection_point_ref if 'external_connection_point_ref' in \
+                                                    intf.as_dict() else intf.internal_connection_point_ref
+                for intf in vdud.interface:
+                    assert intf.name in vdur_intf_dict
+                    if intf.internal_connection_point_ref:
+                        vdud_intf_cp_ref = intf.internal_connection_point_ref
+                    else:
+                        vdud_intf_cp_ref = intf.external_connection_point_ref
+                    assert vdur_intf_dict[intf.name] == vdud_intf_cp_ref
 
     def test_external_vl(self, proxy):
         """
@@ -267,7 +299,7 @@
             assert cp_des[0].name == cp_rec[0].name
             assert self.is_valid_ip(cp_rec[0].ip_address) is True
 
-            xpath = "/vlr-catalog/vlr[id='{}']".format(cp_rec[0].vlr_ref)
+            xpath = "/rw-project:project[rw-project:name='default']/vlr-catalog/vlr[id={}]".format(quoted_key(cp_rec[0].vlr_ref))
             vlr = proxy(RwVlrYang).get(xpath)
 
             assert len(vlr.network_id) > 0
@@ -276,7 +308,7 @@
             assert self.is_valid_ip(ip) is True
             assert vlr.operational_status == "running"
 
-
+    @pytest.mark.skipif(pytest.config.getoption("--port-sequencing"), reason="port-sequencing test uses two VLs in NSD")
     def test_nsr_record(self, proxy):
         """
         Currently we only test for the components of NSR tests. Ignoring the
@@ -288,31 +320,36 @@
         """
         for nsr_cfg, nsr in yield_nsrc_nsro_pairs(proxy):
             # 1 n/w and 2 connection points
-            assert len(nsr.vlr) == 1
+            assert len(nsr.vlr) == 2
             assert len(nsr.vlr[0].vnfr_connection_point_ref) == 2
 
             assert len(nsr.constituent_vnfr_ref) == 2
             assert nsr_cfg.admin_status == 'ENABLED'
 
-    def test_wait_for_pingpong_configured(self, proxy):
-        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+    def test_wait_for_ns_configured(self, proxy):
+        nsr_opdata = proxy(RwNsrYang).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
         nsrs = nsr_opdata.nsr
 
         assert len(nsrs) == 1
         current_nsr = nsrs[0]
 
-        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(current_nsr.ns_instance_config_ref)
+        xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(quoted_key(current_nsr.ns_instance_config_ref))
         proxy(RwNsrYang).wait_for(xpath, "configured", timeout=400)
 
-    def test_monitoring_params(self, proxy):
+    def test_wait_for_pingpong_vnf_configured(self, proxy):
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            xpath = "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr[id={}]/config-status".format(quoted_key(vnfr.id))
+            proxy(VnfrYang).wait_for(xpath, "configured", timeout=400)
+    
+    def test_vnf_monitoring_params(self, proxy):
         """
         Asserts:
         1. The value counter ticks?
         2. If the meta fields are copied over
         """
         def mon_param_record(vnfr_id, mon_param_id):
-             return '/vnfr-catalog/vnfr[id="{}"]/monitoring-param[id="{}"]'.format(
-                    vnfr_id, mon_param_id)
+             return '/rw-project:project[rw-project:name="default"]/vnfr-catalog/vnfr[id={}]/monitoring-param[id={}]'.format(
+                    quoted_key(vnfr_id), quoted_key(mon_param_id))
 
         for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
             for mon_des in (vnfd.monitoring_param):
@@ -326,7 +363,37 @@
                 # Tick check
                 #assert mon_rec.value_integer > 0
 
-    def test_cm_nsr(self, proxy):
+    def test_ns_monitoring_params(self, logger, proxy):
+        """
+        Asserts:
+            1. monitoring-param match in nsd and ns-opdata
+            2. The value counter ticks?
+        """
+        mon_param_path = '/rw-project:project[rw-project:name="default"]/ns-instance-opdata/nsr[ns-instance-config-ref={}]/monitoring-param[id={}]'
+        def fetch_monparam_value(nsr_ref, mon_param_id):
+            """Returns the monitoring parameter value"""
+            mon_param = proxy(NsrYang).get(mon_param_path.format(quoted_key(nsr_ref), quoted_key(mon_param_id)))
+            return mon_param.value_integer
+
+        def check_monparam_value(nsr_ref, mon_param_id):
+            """Check if monitoring-param values are getting updated"""
+            recent_mon_param_value = fetch_monparam_value(nsr_ref, mon_param_id)
+
+            # Monitor the values over a period of 60 secs. Fail the test if there is no update in mon-param value.
+            s_time = time.time()
+            while (time.time() - s_time) < 60:
+                if fetch_monparam_value(nsr_ref, mon_param_id) > recent_mon_param_value:
+                    return
+                time.sleep(5)
+            assert False, 'mon-param values are not getting updated. Last value was {}'.format(recent_mon_param_value)
+
+        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+            assert len(nsd.monitoring_param) == len(nsr.monitoring_param)
+            for mon_param in nsr.monitoring_param:
+                logger.info('Verifying monitoring-param: {}'.format(mon_param.as_dict()))
+                check_monparam_value(nsr.ns_instance_config_ref, mon_param.id)
+
+    def test_cm_nsr(self, proxy, use_accounts):
         """
         Asserts:
             1. The ID of the NSR in cm-state
@@ -335,10 +402,13 @@
             4. State of the cm-nsr
         """
         for nsd, nsr in yield_nsd_nsr_pairs(proxy):
-            con_nsr_xpath = "/cm-state/cm-nsr[id='{}']".format(nsr.ns_instance_config_ref)
+            con_nsr_xpath = "/rw-project:project[rw-project:name='default']/cm-state/cm-nsr[id={}]".format(
+                quoted_key(nsr.ns_instance_config_ref))
             con_data = proxy(RwConmanYang).get(con_nsr_xpath)
 
-            assert con_data.name == "ping_pong_nsd"
+            if not use_accounts:
+                assert con_data.name == rift.auto.mano.resource_name(nsd.name)
+
             assert len(con_data.cm_vnfr) == 2
 
             state_path = con_nsr_xpath + "/state"
@@ -351,7 +421,7 @@
             2. Name of the vnfr
             3. State of the VNFR
             4. Checks for a reachable IP in mgmt_interface
-            5. Basic checks for connection point and cfg_location.
+            5. Basic checks for connection point
         """
         def is_reachable(ip, timeout=10):
             rc = subprocess.call(["ping", "-c1", "-w", str(timeout), ip])
@@ -360,10 +430,10 @@
             return False
 
         nsr_cfg, _ = list(yield_nsrc_nsro_pairs(proxy))[0]
-        con_nsr_xpath = "/cm-state/cm-nsr[id='{}']".format(nsr_cfg.id)
+        con_nsr_xpath = "/rw-project:project[rw-project:name='default']/cm-state/cm-nsr[id={}]".format(quoted_key(nsr_cfg.id))
 
         for _, vnfr in yield_vnfd_vnfr_pairs(proxy):
-            con_vnfr_path = con_nsr_xpath + "/cm-vnfr[id='{}']".format(vnfr.id)
+            con_vnfr_path = con_nsr_xpath + "/cm-vnfr[id={}]".format(quoted_key(vnfr.id))
             con_data = proxy(RwConmanYang).get(con_vnfr_path)
 
             assert con_data is not None
@@ -374,18 +444,600 @@
             con_data = proxy(RwConmanYang).get(con_vnfr_path)
             assert is_reachable(con_data.mgmt_interface.ip_address) is True
 
-            assert len(con_data.connection_point) == 1
-            connection_point = con_data.connection_point[0]
-            assert connection_point.name == vnfr.connection_point[0].name
-            assert connection_point.ip_address == vnfr.connection_point[0].ip_address
+            if pytest.config.getoption("--port-sequencing"):
+                # there are more than one connection point in the VNFDs for port sequencing test
+                # there is no distinction between icp and cp in 'show cm-state'.
+                # both icp and cp come under connection-point in 'show cm-state'
+                vnfr_intl_extl_connection_points_dict = {}
+                for icp in vnfr.vdur[0].internal_connection_point:
+                    vnfr_intl_extl_connection_points_dict[icp.name] = icp.ip_address
+                for cp in vnfr.connection_point:
+                    vnfr_intl_extl_connection_points_dict[cp.name] = cp.ip_address
 
-            assert con_data.cfg_location is not None
+                assert len(con_data.connection_point) == len(vnfr_intl_extl_connection_points_dict)
+                for cp in con_data.connection_point:
+                    assert cp.name in vnfr_intl_extl_connection_points_dict
+                    assert cp.ip_address == vnfr_intl_extl_connection_points_dict[cp.name]
+            else:
+                assert len(con_data.connection_point) == 2
+                connection_point = con_data.connection_point[0]
+                assert connection_point.name == vnfr.connection_point[0].name
+                assert connection_point.ip_address == vnfr.connection_point[0].ip_address
+
+    @pytest.mark.skipif(
+        not (pytest.config.getoption("--static-ip") or pytest.config.getoption("--update-vnfd-instantiate")),
+        reason="need --static-ip or --update-vnfd-instantiate option to run")
+    def test_static_ip(self, proxy, logger, vim_clients, cloud_account_name):
+        """
+        Asserts:
+            1. static-ip match in vnfd and vnfr
+            2. static-ip match in cm-state
+            3. Get the IP of openstack VM. Match the static-ip
+            4. Check if the VMs are reachable from each other (Skip if type of static ip addresses is IPv6)
+        """
+        nsr_cfg, _ = list(yield_nsrc_nsro_pairs(proxy))[0]
+        con_nsr_xpath = "/rw-project:project[rw-project:name='default']/cm-state/cm-nsr[id={}]".format(quoted_key(nsr_cfg.id))
+
+        ips = {}
+        static_ip_vnfd = False
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            if vnfd.vdu[0].interface[1].static_ip_address:
+                static_ip_vnfd = True
+                assert vnfd.vdu[0].interface[1].static_ip_address == vnfr.connection_point[1].ip_address
+                if 'ping' in vnfd.name:
+                    ips['mgmt_ip'] = vnfr.vdur[0].management_ip
+                else:
+                    ips['static_ip'] = vnfd.vdu[0].interface[1].static_ip_address
+
+                con_vnfr_path = con_nsr_xpath + "/cm-vnfr[id={}]".format(quoted_key(vnfr.id))
+                con_data = proxy(RwConmanYang).get(con_vnfr_path)
+
+                assert con_data is not None
+                assert con_data.connection_point[1].ip_address == vnfd.vdu[0].interface[1].static_ip_address
+
+                xpath = "/rw-project:project[rw-project:name='default']/vlr-catalog/vlr[id={}]".format(quoted_key(vnfr.connection_point[1].vlr_ref))
+                vlr = proxy(RwVlrYang).get(xpath)
+
+                vim_client = vim_clients[cloud_account_name]
+                vm_property = vim_client.nova_server_get(vnfr.vdur[0].vim_id)
+                logger.info('VM properties for {}: {}'.format(vnfd.name, vm_property))
+
+                addr_prop_list = vm_property['addresses'][vlr.name]
+                logger.info('addresses attribute: {}'.format(addr_prop_list))
+
+                addr_prop = [addr_prop for addr_prop in addr_prop_list if addr_prop['addr'] == vnfr.connection_point[1].ip_address]
+                assert addr_prop
+
+        assert static_ip_vnfd   # if False, then none of the VNF descriptors' connections points are carrying static-ip-address field.
+
+        # Check if the VMs are reachable from each other
+        username, password = ['fedora'] * 2
+        ssh_session = SshSession(ips['mgmt_ip'])
+        assert ssh_session
+        assert ssh_session.connect(username=username, password=password)
+        if not self.is_ipv6(ips['static_ip']):
+            assert ssh_session.run_command('ping -c 5 {}'.format(ips['static_ip']))[0] == 0
+
+    @pytest.mark.skipif(not pytest.config.getoption("--vnf-dependencies"), reason="need --vnf-dependencies option to run")
+    def test_vnf_dependencies(self, proxy):
+        """
+        Asserts:
+            1. Match various config parameter sources with config primitive parameters
+            Three types of sources are being verified for pong vnfd.
+                Attribute: A runtime value like IP address of a connection point (../../../mgmt-interface, ip-address)
+                Descriptor: a XPath to a leaf in the VNF descriptor/config (../../../mgmt-interface/port)
+                Value: A pre-defined constant ('admin' as mentioned in pong descriptor)
+            2. Match the config-parameter-map defined in NS descriptor
+        There used to be a check to verify config parameter values in cm-state (cm-state/cm-nsr/cm-vnfr/config-parameter). 
+        Recently that got removed due to confd issue. So, there is no such check currently for cm-state. 
+        """
+        nsr_cfg, _ = list(yield_nsrc_nsro_pairs(proxy))[0]
+        con_nsr_xpath = "/rw-project:project[rw-project:name='default']/cm-state/cm-nsr[id={}]".format(quoted_key(nsr_cfg.id))
+
+        pong_source_map, ping_request_map = None, None
+        
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            # Get cm-state for this vnfr
+            con_vnfr_path = con_nsr_xpath + "/cm-vnfr[id={}]".format(quoted_key(vnfr.id))
+            con_data = proxy(RwConmanYang).get(con_vnfr_path)
+
+            # Match various config parameter sources with config primitive parameters
+            for config_primitive in vnfr.vnf_configuration.config_primitive:
+                if config_primitive.name in ("config", "start-stop"):
+                    for parameter in config_primitive.parameter:
+                        if parameter.name == 'mgmt_ip':
+                            assert parameter.default_value == vnfr.mgmt_interface.ip_address
+                        if parameter.name == 'mgmt_port':
+                            assert parameter.default_value == str(vnfd.mgmt_interface.port)
+                        if parameter.name == 'username':
+                            assert parameter.default_value == 'admin'
+
+                # Fetch the source parameter values from pong vnf and request parameter values from ping vnf
+                if config_primitive.name == "config":
+                    if vnfd.name == "pong_vnfd":
+                        pong_source_map = [parameter.default_value for parameter in config_primitive.parameter if
+                                           parameter.name in ("service_ip", "service_port")]
+                    if vnfd.name == "ping_vnfd":
+                        ping_request_map = [parameter.default_value for parameter in config_primitive.parameter if
+                                            parameter.name in ("pong_ip", "pong_port")]
+        assert pong_source_map
+        assert ping_request_map
+        # Match the config-parameter-map defined in NS descriptor
+        assert sorted(pong_source_map) == sorted(ping_request_map)
+
+    @pytest.mark.skipif(not pytest.config.getoption("--port-security"), reason="need --port-security option to run")
+    def test_port_security(self, proxy, vim_clients, cloud_account_name):
+        """
+        Asserts:
+            1. port-security-enabled match in vnfd and vnfr
+            2. Get port property from openstack. Match these attributes: 'port_security_enabled', 'security_groups'
+        """
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            assert vnfd.connection_point[1].port_security_enabled == vnfr.connection_point[1].port_security_enabled
+
+            xpath = "/rw-project:project[rw-project:name='default']/vlr-catalog/vlr[id={}]".format(quoted_key(vnfr.connection_point[1].vlr_ref))
+            vlr = proxy(RwVlrYang).get(xpath)
+
+            vim_client = vim_clients[cloud_account_name]
+            port = [port for port in vim_client.neutron_port_list() if port['network_id'] == vlr.network_id if
+                    port['name'] == vnfr.connection_point[1].name]
+            assert port
+
+            port_openstack = port[0]
+            assert vnfr.connection_point[1].port_security_enabled == port_openstack['port_security_enabled']
+
+            if vnfr.connection_point[1].port_security_enabled:
+                assert port_openstack['security_groups'] # It has to carry at least one security group if enabled
+            else:
+                assert not port_openstack['security_groups']
+
+    @pytest.mark.skipif(not pytest.config.getoption("--port-sequencing"), reason="need --port-sequencing option to run")
+    def test_explicit_port_sequencing(self, proxy, vim_clients, cloud_account_name, logger, port_sequencing_intf_positions, iteration):
+        """
+        Asserts:
+            1. Interface count match in vnfd and vnfr
+            2. Get interface ordering(mac address) from VM using 'ip a' command; From output of neutron port-list, get 
+            corresponding connection point names in the same order as mac address ordered list. 
+            3. Get interface ordering from the vnfd/vdu
+            4. Compare lists from step-2 and step-3
+        """
+        username, password = ['fedora']*2
+        
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            assert len(vnfd.vdu[0].interface) == len(vnfr.vdur[0].interface)
+
+            logger.debug('Interface details for vnfd {}: {}'.format(vnfd.name, vnfd.vdu[0].as_dict()['interface']))
+
+            if iteration==1:
+                tmp_positional_values_list = []
+                for intf in vnfr.vdur[0].interface:
+                    # if no position is specified for an interface, then vnfr/vdur/interface carries 0 as its positional value
+                    if intf.position!=0:
+                        tmp_positional_values_list.append(intf.position)
+                if 'ping' in vnfd.name:
+                    assert not tmp_positional_values_list
+                if 'pong' in vnfd.name:
+                    assert set(tmp_positional_values_list) == set(port_sequencing_intf_positions)
+
+            # Get a sorted list of interfaces from vnfd/vdu
+            icp_key_name, ecp_key_name = 'internal_connection_point_ref', 'external_connection_point_ref'
+            intf_with_position_field_dict, intf_without_position_field_list = {}, []
+            
+            for intf in vnfd.vdu[0].interface:
+                intf = intf.as_dict()
+                cp_ref_key = icp_key_name if icp_key_name in intf else ecp_key_name
+                if 'position' in intf:
+                    intf_with_position_field_dict[intf['position']] = intf[cp_ref_key]
+                else:
+                    intf_without_position_field_list.append(intf[cp_ref_key])
+            
+            intf_with_position_field_list = sorted(intf_with_position_field_dict.items(), key=operator.itemgetter(0))
+            sorted_cp_names_in_vnfd = [pos_cpname_tuple[1] for pos_cpname_tuple in intf_with_position_field_list] + \
+                                                                sorted(intf_without_position_field_list)
+            
+            # Establish a ssh session to VDU to get mac address list sorted by interfaces 
+            ssh_session = SshSession(vnfr.vdur[0].management_ip)
+            assert ssh_session
+            assert ssh_session.connect(username=username, password=password)
+            e_code, ip_output, err = ssh_session.run_command('sudo ip a')
+            assert e_code == 0
+            logger.debug('Output of "ip a": {}'.format(ip_output))
+            mac_addr_list = re.findall(r'link/ether\s+(.*)\s+brd', ip_output)
+
+            # exclude eth0 as it is always a mgmt-interface
+            interface_starting_index = len(mac_addr_list) - len(vnfd.vdu[0].interface)
+            mac_addr_list = mac_addr_list[interface_starting_index: ]
+
+            # Get neutron port list
+            neutron_port_list = vim_clients[cloud_account_name].neutron_port_list()
+
+            # Get those ports whose mac_address value matches with one of the mac addresses in mac_addr_list
+            # This new list is already sorted as the outer loop iterates over mac_addr_list
+            sorted_cp_names_in_vm = [neutron_port_dict['name'] for mac in mac_addr_list for neutron_port_dict in neutron_port_list 
+                                                    if mac==neutron_port_dict['mac_address']]
+
+            logger.debug('Sorted connection points as per "ip a" in VM: {}'.format(sorted_cp_names_in_vm))
+            logger.debug('Sorted connection points as per ordering mentioned in vnfd: {}'.format(sorted_cp_names_in_vnfd))
+            
+            assert sorted_cp_names_in_vm == sorted_cp_names_in_vnfd
+
+    @pytest.mark.skipif(
+        not (pytest.config.getoption("--vnf-dependencies") and
+             pytest.config.getoption("--service-primitive")),
+        reason="need --vnf-dependencies and --service-primitive option to run")
+    def test_primitives(
+            self, mgmt_session, cloud_module, cloud_account, descriptors,
+            fmt_nsd_catalog_xpath, logger):
+        """Testing service primitives and config primitives."""
+        # Create a cloud account
+        rift.auto.mano.create_cloud_account(
+            mgmt_session, cloud_account, "default")
+
+        rwnsr_pxy = mgmt_session.proxy(RwNsrYang)
+        nsr_pxy = mgmt_session.proxy(NsrYang)
+        rwvnfr_pxy = mgmt_session.proxy(RwVnfrYang)
+
+        # Testing a custom service primitive
+        ns_opdata = rwnsr_pxy.get(
+            '/rw-project:project[rw-project:name="default"]' +
+            '/ns-instance-opdata/nsr'
+        )
+        nsr_id = ns_opdata.ns_instance_config_ref
+        sp_rpc_input = NsrYang.YangInput_Nsr_ExecNsServicePrimitive.from_dict(
+            {'name': 'primitive_test', 'nsr_id_ref': nsr_id})
+        nsr_pxy.rpc(sp_rpc_input)
+
+        # Testing a config primitive
+        vnfr_catalog = rwvnfr_pxy.get(
+            '/rw-project:project[rw-project:name="default"]' +
+            '/vnfr-catalog'
+        )
+        cp_rpc_input = NsrYang.YangInput_Nsr_ExecNsServicePrimitive.from_dict(
+            {'nsr_id_ref': nsr_id})
+        vnf_list = cp_rpc_input.create_vnf_list()
+        vnf_primitive = vnf_list.create_vnf_primitive()
+        vnf_primitive.index = 1
+        vnf_primitive.name = "start-stop"
+        vnf_list.member_vnf_index_ref = (
+            vnfr_catalog.vnfr[0].member_vnf_index_ref
+        )
+        vnf_list._set_vnfr_id_ref(vnfr_catalog.vnfr[0].id)
+        vnf_list.vnf_primitive.append(vnf_primitive)
+        cp_rpc_input.vnf_list.append(vnf_list)
+        nsr_pxy.rpc(cp_rpc_input)
+        # Checking nsd joblist to see if both tests passed
+
+        def check_job_status(status=None):
+            ns_opdata = rwnsr_pxy.get(
+                '/rw-project:project[rw-project:name="default"]' +
+                '/ns-instance-opdata/nsr'
+            )
+            counter = 0
+            counter_limit = 2
+            for idx in range(0, counter_limit):
+                if ns_opdata.config_agent_job[idx].job_status == 'failure':
+                    err_msg = (
+                        'Service primitive test failed.' +
+                        ' The config agent reported failure job status')
+                    raise JobStatusError(err_msg)
+
+                elif ns_opdata.config_agent_job[idx].job_status == 'success':
+                    counter += 1
+                    continue
+
+            if counter == counter_limit:
+                return True
+            else:
+                time.sleep(5)
+                return False
+
+        start_time = time.time()
+        while (time.time() - start_time < 60):
+            status = check_job_status()
+            if status:
+                break
+        else:
+            err_msg = (
+                'Service primitive test failed. Timed out: 60 seconds' +
+                'The config agent never reached a success status')
+            raise JobStatusError(err_msg)
+
+    @pytest.mark.skipif(
+        not (pytest.config.getoption("--metadata-vdud") or pytest.config.getoption("--metadata-vdud-cfgfile")),
+        reason="need --metadata-vdud or --metadata-vdud-cfgfile option to run")
+    def test_metadata_vdud(self, logger, proxy, vim_clients, cloud_account_name, metadata_host):
+        """
+        Asserts:
+            1. content of supplemental-boot-data match in vnfd and vnfr
+            vnfr may carry extra custom-meta-data fields (e.g pci_assignement) which are by default enabled during VM creation by openstack.
+            vnfr doesn't carry config_file details; so that will be skipped during matching.
+            2. boot-data-drive match with openstack VM's config_drive attribute
+            3. For each VDUD which have config-file fields mentioned, check if there exists a path in the VM which 
+            matches with config-file's dest field. (Only applicable for cirros_cfgfile_vnfd VNF RIFT-15524)
+            4. For each VDUD, match its custom-meta-data fields with openstack VM's properties field
+        """
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            if any(name in vnfd.name for name in ['ping', 'pong', 'fedora']):
+                username, password = ['fedora'] * 2
+            elif 'ubuntu' in vnfd.name:
+                username, password = ['ubuntu'] * 2
+            elif 'cirros' in vnfd.name:
+                username, password = 'cirros', 'cubswin:)'
+            else:
+                assert False, 'Not expected to use this VNFD {} in this systemtest. VNFD might have changed. Exiting the test.'.format(
+                    vnfd.name)
+
+            # Wait till VNF's operational-status becomes 'running'
+            # The below check is usually covered as part of test_wait_for_ns_configured
+            # But, this is mostly needed when non- ping pong packages are used e.g cirrus cfgfile package
+            xpath = "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr[id={}]/operational-status".format(quoted_key(vnfr.id))
+            proxy(VnfrYang).wait_for(xpath, "running", timeout=300)
+            time.sleep(5)
+
+            # Get the VDU details from openstack
+            vim_client = vim_clients[cloud_account_name]
+            vm_property = vim_client.nova_server_get(vnfr.vdur[0].vim_id)
+            logger.info('VM property for {}: {}'.format(vnfd.name, vm_property))
+            
+            # Establish a ssh session to VDU
+            ssh_session = SshSession(vnfr.vdur[0].management_ip)
+            assert ssh_session
+            assert ssh_session.connect(username=username, password=password)
+
+            assert vnfd.vdu[0].supplemental_boot_data.boot_data_drive == vnfr.vdur[
+                0].supplemental_boot_data.boot_data_drive == bool(vm_property['config_drive'])
+            # Using bool() because vm_property['config_drive'] returns 'True' or '' whereas vnfr/vnfd returns True/False
+
+            # Assert 3: only for cirros vnf
+            if 'cirros' in vnfd.name:
+                for config_file in vnfd.vdu[0].supplemental_boot_data.config_file:
+                   assert ssh_session.run_command('test -e {}'.format(config_file.dest))[0] == 0
+
+            vdur_metadata = {metadata.name: metadata.value for metadata in
+                             vnfr.vdur[0].supplemental_boot_data.custom_meta_data}
+
+            # Get the user-data/metadata from VM
+            e_code, vm_metadata, _ = ssh_session.run_command(
+                'curl http://{}/openstack/latest/meta_data.json'.format(metadata_host))
+            assert e_code == 0
+            vm_metadata = json.loads(vm_metadata)['meta']
+            logger.debug('VM metadata for {}: {}'.format(vnfd.name, vm_metadata))
+
+            for vdud_metadata in vnfd.vdu[0].supplemental_boot_data.custom_meta_data:
+                assert vdud_metadata.value == vdur_metadata[vdud_metadata.name]
+                assert vdud_metadata.value == vm_metadata[vdud_metadata.name]
+
+    @pytest.mark.skipif(not pytest.config.getoption("--multidisk"), reason="need --multidisk option to run")
+    def test_multidisk(self, logger, proxy, vim_clients, cloud_account_name, multidisk_testdata):
+        """
+        This feature is only supported in openstack, brocade vCPE.
+        Asserts:
+            1. volumes match in vnfd and vnfr
+            2. volumes match in vnfr and openstack host
+            Check no of volumes attached to the VNF VM. It should match no of volumes defined in VDUD.
+            Match volume names. In 'openstack volume show <vol_uuid>', the device should be /dev/<volume_name_in_vdud>
+            Match the volume source.
+            Match the volume size.
+            Match the Volume IDs mentioned in VNFR with openstack volume's ID.
+        """
+        ping_test_data, pong_test_data = multidisk_testdata
+        vol_attr = ['device_type', None, 'size', 'image', 'boot_priority']
+        # device_bus doesn't appear in vnfr/vdur
+
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            logger.info('Verifying VNF {}'.format(vnfd.name))
+            vnf_testdata = ping_test_data if 'ping' in vnfd.name else pong_test_data
+            
+            # Assert 1: Match volumes in vnfd, vnfr, test data
+            assert len(vnfd.vdu[0].volumes) == len(vnfr.vdur[0].volumes)
+
+            for vnfr_vol in vnfr.vdur[0].volumes:
+                logger.info('Verifying vnfr volume: {}'.format(vnfr_vol.as_dict()))
+                vnfd_vol = [vol for vol in vnfd.vdu[0].volumes if vol.name==vnfr_vol.name][0]
+
+                vol_testdata = vnf_testdata[vnfr_vol.name]
+
+                for i, attr in enumerate(vol_attr):
+                    if attr == None:    # device_bus doesn't appear in vnfr/vdur
+                        continue
+                    if i == 3 and (vol_testdata[i]==None or getattr(vnfd_vol, 'ephemeral')):
+                        # volume source of type ephemeral doesn't appear in vnfr/vdur
+                        # If no image is defined for a volume, getattr(vnfr_vol, 'ephemeral') returns False. Strange. RIFT-15165
+                        assert not getattr(vnfd_vol, 'image')
+                        continue
+                        
+                    assert getattr(vnfd_vol, attr) == getattr(vnfr_vol, attr)
+                    if vol_testdata[i] is not None:
+                        assert getattr(vnfd_vol, attr) == vol_testdata[i]
+
+            # Assert 2: Volumes match in vnfr and openstack host
+            # Get VM properties from the VIM
+            vim_client = vim_clients[cloud_account_name]
+            vm_property = vim_client.nova_server_get(vnfr.vdur[0].vim_id)
+            logger.info('VIM- VM properties: {}'.format(vm_property))
+            
+            # Get the volumes attached to this VNF VM
+            vim_volumes = vm_property['os-extended-volumes:volumes_attached']
+            logger.info('VIM- Volumes attached to this VNF VM: {}'.format(vim_volumes))
+            
+            assert vim_volumes
+            assert len(vim_volumes) == len(vnfr.vdur[0].volumes)
+
+            vnfr_volumes_by_id = {vol.volume_id:vol for vol in vnfr.vdur[0].volumes}
+            for vim_volume in vim_volumes:
+                # Match the Volume IDs mentioned in VNFR with openstack volume's ID.
+                logger.info('Verifying volume: {}'.format(vim_volume['id']))
+                assert vim_volume['id'] in vnfr_volumes_by_id.keys()
+                vnfr_vol_ = vnfr_volumes_by_id[vim_volume['id']]
+
+                # Get volume details. Equivalent cli: openstack volume show <uuid>
+                vim_vol_attrs = vim_client.cinder_volume_get(vim_volume['id'])
+
+                # Match volume size
+                assert vnfr_vol_.size == vim_vol_attrs.size
+
+                # Match volume source
+                if vnfr_vol_.image: # To make sure this is not ephemeral type
+                    logger.info('VIM- Image details of the volume: {}'.format(vim_vol_attrs.volume_image_metadata))
+                    assert vnfr_vol_.image == vim_vol_attrs.volume_image_metadata['image_name']
+                else:
+                    assert not hasattr(vim_vol_attrs, 'volume_image_metadata')
+
+                # Match volume name e.g 'device': u'/dev/vdf'
+                logger.info('Verifying [{}] in attached volumes {}'.format(vnfr_vol_.name, vim_vol_attrs.attachments))
+                assert [attachment for attachment in vim_vol_attrs.attachments if vnfr_vol_.name in attachment['device']]
+
+    @pytest.mark.skipif(not pytest.config.getoption("--l2-port-chaining"), reason="need --l2-port-chaining option to run")
+    def test_l2_port_chaining(self, proxy):
+        """
+        It uses existing NS, VNF packages: $RIFT_INSTALL/usr/rift/mano/nsds/vnffg_demo_nsd/vnffg_l2portchain_*.
+        This test function is specific to these packages. Those VNFs use Ubuntu trusty image ubuntu_trusty_1404.qcow2.
+        Asserts:
+            1. Count of VNFFG in nsd and nsr
+            2. Count of rsp, classifier in VNFFG descriptor and VNFFG record
+            3.              Need details what other fields need to be matched in nsd and nsr
+            4. Traffic flows through internal hops as per the classifier and rsp
+            As per the classifiers in NS package, the following flows will be tested.
+            - Tcp packets with dest port 80 starting from pgw VNF should go through Firewall VNF.
+            - Udp packets with source port 80 starting from router VNF should go through nat->dpi
+            - Udp packets with dest port 80 starting from pgw VNF should go through dpi->nat
+
+        """
+        UDP_PROTOCOL, TCP_PROTOCOL = 17, 6
+
+        def pcap_analysis(pcap_file, src_add, dst_add, src_port=None, dst_port=None, protocol=6):
+            """Analyse packets in a pcap file and return True if there is a packet match w.r.t src_addr, dst_addr, protocol.
+            Args:
+                pcap_file: pcap file that is generated by traffic analysis utility such as tcpdump
+                src_add, dst_addr: Source & dest IP which need to be matched for a packet
+                protocol: Protocol that needs to be matched for a packet which already matched src_addr, dst_addr (protocol accepts integer e.g TCP 6, UDP 17)
+            
+            Returns:
+                timestamp of the packet which is matched (Needed to check packet flow order through VNFs)
+                or
+                False: if there is no packet match
+
+            It uses scapy module to analyse pcap file. pip3 install scapy-python3
+            Other options https://pypi.python.org/pypi/pypcapfile
+            """
+            assert os.path.exists(pcap_file)
+            pkt_type = TCP if protocol==6 else UDP
+
+            pcap_obj = rdpcap(pcap_file)
+            for pkt in pcap_obj:
+                if IP in pkt:
+                    if not(pkt[IP].src==src_add and pkt[IP].dst==dst_add and pkt[IP].proto==protocol):
+                        continue
+                    if pkt_type in pkt:
+                        if src_port:
+                            if not (pkt[pkt_type].sport==src_port):
+                                continue
+                        if dst_port:
+                            if not (pkt[pkt_type].dport==dst_port):
+                                continue
+                    return pkt[IP].time
+            return False
+
+        # Check the VNFFG in nsd and nsr
+        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+            vnffgds = nsd.vnffgd
+            vnffgrs = nsr.vnffgr
+            assert len(vnffgds) == len(vnffgrs)
+
+        # Check the classifier, rsp in nsd and nsr
+        for vnffgd in vnffgds:
+            vnffgr = [vnffgr for vnffgr in vnffgrs if vnffgd.id == vnffgr.vnffgd_id_ref][0]
+            assert len(vnffgd.rsp) == len(vnffgr.rsp)
+            assert len(vnffgd.classifier) == len(vnffgr.classifier)
+
+        vnfrs = proxy(RwVnfrYang).get('/rw-project:project[rw-project:name="default"]/vnfr-catalog/vnfr', list_obj=True)
+
+        # Get the IP of VMs
+        vm_names = ('router', 'firewall', 'dpi', 'nat', 'pgw')
+        vm_ips = {vm_name: vnfr.vdur[0].vm_management_ip for vm_name in vm_names for vnfr in vnfrs.vnfr if
+                  vm_name in vnfr.name}
+        vm_cp_ips = {vm_name: vnfr.connection_point[0].ip_address for vm_name in vm_names for vnfr in vnfrs.vnfr if
+                  vm_name in vnfr.name}
+
+        # Establish Ssh sessions to the VMs
+        ssh_sessions = {}
+        for vm_name, vm_ip in vm_ips.items():
+            ssh_session = SshSession(vm_ip)
+            assert ssh_session
+            assert ssh_session.connect(username='ubuntu', password='ubuntu')
+            ssh_sessions[vm_name] = ssh_session
+
+        # Start python's SimpleHTTPServer on port 80 in the router VM
+        e_code, _, _ = ssh_sessions['router'].run_command('sudo python -m SimpleHTTPServer 80', max_wait=5)
+        assert e_code is None   # Due to blocking call, it should timeout and return 'None' as exit code
+
+
+        # Check: Tcp packets with dest port 80 starting from pgw VNF should go through Firewall VNF.
+        pcap_file = 'l2test_firewall.pcap'
+        # Start tcpdump in firewall vnf and start sending tcp packets from pgw vnf
+        e_code, _, _ = ssh_sessions['firewall'].run_command(
+            'sudo tcpdump -i eth1 -w {pcap} & sleep 10; sudo kill $!'.format(pcap=pcap_file), max_wait=4)
+        e_code, _, _ = ssh_sessions['pgw'].run_command('sudo nc {router_ip} 80 -w 0'.format(router_ip=vm_cp_ips['router']))
+
+        # Copy pcap file from firewall vnf for packet analysis
+        time.sleep(10)
+        assert ssh_sessions['firewall'].get(pcap_file, pcap_file)
+        assert pcap_analysis(pcap_file, vm_cp_ips['pgw'], vm_cp_ips['router'], dst_port=80, protocol=TCP_PROTOCOL)
+
+
+        # Check: Udp packets with source port 80 starting from router VNF should go through nat->dpi
+        pcap_nat = 'l2test_nat1.pcap'
+        pcap_dpi = 'l2test_dpi1.pcap'
+        # Start tcpdump in nat, dpi vnf and start sending udp packets from router vnf
+        e_code, _, _ = ssh_sessions['nat'].run_command(
+            'sudo tcpdump -i eth1 -w {pcap} & sleep 15; sudo kill $!'.format(pcap=pcap_nat), max_wait=4)
+        e_code, _, _ = ssh_sessions['dpi'].run_command(
+            'sudo tcpdump -i eth1 -w {pcap} & sleep 10; sudo kill $!'.format(pcap=pcap_dpi), max_wait=4)
+        e_code, _, _ = ssh_sessions['router'].run_command(
+            'echo -n "hello" |  sudo nc -4u {pgw_ip} 1000 -s {router_ip} -p 80 -w 0'.format(pgw_ip=vm_cp_ips['pgw'],
+                                                                                            router_ip=vm_cp_ips[
+                                                                                                'router']))
+
+        # Copy pcap file from nat, dpi vnf for packet analysis
+        time.sleep(10)
+        assert ssh_sessions['nat'].get(pcap_nat, pcap_nat)
+        assert ssh_sessions['dpi'].get(pcap_dpi, pcap_dpi)
+        packet_ts_nat = pcap_analysis(pcap_nat, vm_cp_ips['router'], vm_cp_ips['pgw'], src_port=80, protocol=UDP_PROTOCOL)
+        packet_ts_dpi = pcap_analysis(pcap_dpi, vm_cp_ips['router'], vm_cp_ips['pgw'], src_port=80, protocol=UDP_PROTOCOL)
+        assert packet_ts_nat
+        assert packet_ts_dpi
+        assert packet_ts_nat < packet_ts_dpi    # Packet flow must follow nat -> dpi
+
+
+        # Check: Udp packets with dest port 80 starting from pgw VNF should go through dpi->nat
+        pcap_nat = 'l2test_nat2.pcap'
+        pcap_dpi = 'l2test_dpi2.pcap'
+        # Start tcpdump in nat, dpi vnf and start sending udp packets from router vnf
+        e_code, _, _ = ssh_sessions['nat'].run_command(
+            'sudo tcpdump -i eth1 -w {pcap} & sleep 15; sudo kill $!'.format(pcap=pcap_nat), max_wait=4)
+        e_code, _, _ = ssh_sessions['dpi'].run_command(
+            'sudo tcpdump -i eth1 -w {pcap} & sleep 10; sudo kill $!'.format(pcap=pcap_dpi), max_wait=4)
+        e_code, _, _ = ssh_sessions['pgw'].run_command(
+            'echo -n "hello" | sudo nc -4u {router_ip} 80 -w 0'.format(router_ip=vm_cp_ips['router']))
+
+        # Copy pcap file from nat, dpi vnf for packet analysis
+        time.sleep(10)
+        assert ssh_sessions['nat'].get(pcap_nat, pcap_nat)
+        assert ssh_sessions['dpi'].get(pcap_dpi, pcap_dpi)
+        packet_ts_nat = pcap_analysis(pcap_nat, vm_cp_ips['pgw'], vm_cp_ips['router'], dst_port=80, protocol=UDP_PROTOCOL)
+        packet_ts_dpi = pcap_analysis(pcap_dpi, vm_cp_ips['pgw'], vm_cp_ips['router'], dst_port=80, protocol=UDP_PROTOCOL)
+        assert packet_ts_nat
+        assert packet_ts_dpi
+        # The below assert used to fail while testing. ts_dpi is ahead of ts_nat in few microseconds
+        # Need to confirm if thats expected
+        assert packet_ts_dpi < packet_ts_nat    # Packet flow must follow dpi -> nat
 
 @pytest.mark.depends('nsr')
 @pytest.mark.setup('nfvi')
 @pytest.mark.incremental
 class TestNfviMetrics(object):
 
+    @pytest.mark.skipif(True, reason='NFVI metrics are disabled - RIFT-15789')
     def test_records_present(self, proxy):
         assert_records(proxy)
 
@@ -445,43 +1097,44 @@
 
 @pytest.mark.depends('nfvi')
 @pytest.mark.incremental
+@pytest.mark.skipif(pytest.config.getoption("--port-sequencing"), reason="Skip this for port-sequencing test")
 class TestRecordsDescriptors:
-    def test_create_update_vnfd(self, proxy, updated_ping_pong_records):
+    def test_create_update_vnfd(self, proxy, updated_ping_pong_descriptors):
         """
         Verify VNFD related operations
 
         Asserts:
             If a VNFD record is created
         """
-        ping_vnfd, pong_vnfd, _ = updated_ping_pong_records
-        vnfdproxy = proxy(RwVnfdYang)
+        ping_vnfd, pong_vnfd, _ = updated_ping_pong_descriptors
+        vnfdproxy = proxy(RwProjectVnfdYang)
 
-        for vnfd_record in [ping_vnfd, pong_vnfd]:
-            xpath = "/vnfd-catalog/vnfd"
-            vnfdproxy.create_config(xpath, vnfd_record.vnfd)
+        for vnfd in [ping_vnfd, pong_vnfd]:
+            xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd"
+            vnfdproxy.create_config(xpath, vnfd)
 
-            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
-            vnfd = vnfdproxy.get(xpath)
-            assert vnfd.id == vnfd_record.id
+            xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd.id))
+            updated_vnfd = vnfdproxy.get(xpath)
+            assert updated_vnfd.id == vnfd.id
 
-            vnfdproxy.replace_config(xpath, vnfd_record.vnfd)
+            vnfdproxy.replace_config(xpath, vnfd)
 
-    def test_create_update_nsd(self, proxy, updated_ping_pong_records):
+    def test_create_update_nsd(self, proxy, updated_ping_pong_descriptors):
         """
         Verify NSD related operations
 
         Asserts:
             If NSD record was created
         """
-        _, _, ping_pong_nsd = updated_ping_pong_records
-        nsdproxy = proxy(NsdYang)
+        _, _, ping_pong_nsd = updated_ping_pong_descriptors
+        nsdproxy = proxy(RwProjectNsdYang)
 
-        xpath = "/nsd-catalog/nsd"
-        nsdproxy.create_config(xpath, ping_pong_nsd.descriptor)
+        xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd"
+        nsdproxy.create_config(xpath, ping_pong_nsd)
 
-        xpath = "/nsd-catalog/nsd[id='{}']".format(ping_pong_nsd.id)
+        xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(quoted_key(ping_pong_nsd.id))
         nsd = nsdproxy.get(xpath)
         assert nsd.id == ping_pong_nsd.id
 
-        nsdproxy.replace_config(xpath, ping_pong_nsd.descriptor)
+        nsdproxy.replace_config(xpath, ping_pong_nsd)
 
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py
index 0878db7..ee98905 100644
--- a/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py
@@ -22,8 +22,10 @@
 @brief Pingpong scaling system test
 """
 
+import gi
 import os
 import pytest
+import re
 import subprocess
 import sys
 import time
@@ -35,27 +37,37 @@
 
 from gi.repository import (
     NsrYang,
-    NsdYang,
+    RwProjectNsdYang,
     VnfrYang,
     RwNsrYang,
-    RwNsdYang,
     RwVnfrYang,
 )
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
 
 @pytest.mark.setup('pingpong_nsd')
 @pytest.mark.depends('launchpad')
 class TestSetupPingpongNsd(object):
     def test_onboard(self, mgmt_session, descriptors):
         for descriptor in descriptors:
-            rift.auto.descriptor.onboard(mgmt_session.host, descriptor)
+            rift.auto.descriptor.onboard(mgmt_session, descriptor)
 
     def test_install_sar(self, mgmt_session):
-        install_cmd = 'ssh {mgmt_ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sudo yum install sysstat --assumeyes'.format(
-                mgmt_ip=mgmt_session.host,
-        )
+        get_platform_cmd = 'ssh {host} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- python3 -mplatform'
+        platform_result = subprocess.check_output(get_platform_cmd.format(host=mgmt_session.host), shell=True)
+        platform_match = re.search('(Ubuntu|fedora)-(\d+)', platform_result.decode('ascii'))
+        assert platform_match is not None
+        (dist, ver) = platform_match.groups()
+        if dist == 'fedora':
+            install_cmd = 'ssh {host} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sudo yum install sysstat --assumeyes'.format(
+                    host=mgmt_session.host,
+            )
+        elif dist == 'Ubuntu':
+            install_cmd = 'ssh {host} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sudo apt-get -q -y install sysstat'.format(
+                    host=mgmt_session.host,
+            )
         subprocess.check_call(install_cmd, shell=True)
 
-
 @pytest.fixture(scope='function', params=[5,10,15,20,25])
 def service_count(request):
     '''Fixture representing the number of services to test'''
@@ -67,10 +79,10 @@
     def test_scaling(self, mgmt_session, cloud_account_name, service_count):
 
         def start_services(mgmt_session, desired_service_count, max_attempts=3): 
-            catalog = mgmt_session.proxy(NsdYang).get_config('/nsd-catalog')
+            catalog = mgmt_session.proxy(RwProjectNsdYang).get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
             nsd = catalog.nsd[0]
             
-            nsr_path = "/ns-instance-config"
+            nsr_path = "/rw-project:project[rw-project:name='default']/ns-instance-config"
             nsr = mgmt_session.proxy(RwNsrYang).get_config(nsr_path)
             service_count = len(nsr.nsr)
 
@@ -78,23 +90,29 @@
             while attempts < max_attempts and service_count < desired_service_count:
                 attempts += 1
 
+                old_opdata = mgmt_session.proxy(RwNsrYang).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
                 for count in range(service_count, desired_service_count):
                     nsr = rift.auto.descriptor.create_nsr(
                         cloud_account_name,
                         "pingpong_%s" % str(uuid.uuid4().hex[:10]),
-                        nsd.id)
-                    mgmt_session.proxy(RwNsrYang).create_config('/ns-instance-config/nsr', nsr)
+                        nsd)
+                    mgmt_session.proxy(RwNsrYang).create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr)
 
-                ns_instance_opdata = mgmt_session.proxy(RwNsrYang).get('/ns-instance-opdata')
-                for nsr in ns_instance_opdata.nsr:
+                time.sleep(10)
+
+                new_opdata = mgmt_session.proxy(RwNsrYang).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
+                new_ns_instance_config_refs = {nsr.ns_instance_config_ref for nsr in new_opdata.nsr} - {nsr.ns_instance_config_ref for nsr in old_opdata.nsr}
+                for ns_instance_config_ref in new_ns_instance_config_refs:
                     try:
-                        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
-                        mgmt_session.proxy(RwNsrYang).wait_for(xpath, "running", fail_on=['failed'], timeout=180)
-                        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(nsr.ns_instance_config_ref)
+                        xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(quoted_key(ns_instance_config_ref))
+                        mgmt_session.proxy(RwNsrYang).wait_for(xpath, "running", fail_on=['failed'], timeout=400)
+                        xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(quoted_key(ns_instance_config_ref))
                         mgmt_session.proxy(RwNsrYang).wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
                         service_count += 1
+                        attempts = 0 # Made some progress so reset the number of attempts remaining
                     except rift.auto.session.ProxyWaitForError:
-                        mgmt_session.proxy(RwNsrYang).delete_config("/ns-instance-config/nsr[id='{}']".format(nsr.ns_instance_config_ref))
+                        mgmt_session.proxy(RwNsrYang).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(quoted_key(ns_instance_config_ref)))
+                        time.sleep(5)
 
         def monitor_launchpad_performance(service_count, interval=30, samples=1):
             sar_cmd = "ssh {mgmt_ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sar -A {interval} {samples}".format(
@@ -122,12 +140,12 @@
 class TestTeardownPingpongNsr(object):
     def test_teardown_nsr(self, mgmt_session):
 
-        ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config('/ns-instance-config')
+        ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config('/rw-project:project[rw-project:name="default"]/ns-instance-config')
         for nsr in ns_instance_config.nsr:
-            mgmt_session.proxy(RwNsrYang).delete_config("/ns-instance-config/nsr[id='{}']".format(nsr.id))
+            mgmt_session.proxy(RwNsrYang).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(quoted_key(nsr.id)))
 
         time.sleep(60)
-        vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get('/vnfr-catalog')
+        vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get('/rw-project:project[rw-project:name="default"]/vnfr-catalog')
         assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
 
     def test_generate_plots(self):
diff --git a/rwlaunchpad/ra/pytest/ns/rbac/conftest.py b/rwlaunchpad/ra/pytest/ns/rbac/conftest.py
new file mode 100644
index 0000000..1b3f413
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/rbac/conftest.py
@@ -0,0 +1,115 @@
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import pytest
+import itertools
+import random
+import os
+import gi
+
+import rift.auto.session
+import rift.auto.mano
+
+gi.require_version('RwAuthExtWebSvcYang', '1.0')
+gi.require_version('RwAuthExtUserYang', '1.0')
+from gi.repository import (
+    RwAuthExtWebSvcYang,
+    RwAuthExtUserYang,
+    )
+
+@pytest.fixture(scope='session')
+def auto_certs_dir():
+    """Fixture that returns path of certs specific to automation"""
+    return os.path.join(os.getenv('RIFT_INSTALL'), 'usr/rift/systemtest/config/ssl')
+
+@pytest.fixture(scope='session')
+def set_webauth_cert_choice(tbac):
+    """Fixture that retuns a boolean value indicating whether to configure new key & cert in launchpad"""
+    if not tbac:
+        return False
+    # return random.choice([True, False])
+    return True
+
+@pytest.fixture(scope='session', autouse=True)
+def configure_key_cert(logger, set_webauth_cert_choice, auto_certs_dir, mgmt_session, confd_host, rw_user_proxy, 
+    user_domain, ):
+    """Configures new cert, key in webauth-server-config, webauth-client-config"""
+    if set_webauth_cert_choice:
+        logger.debug('Configuring new certs from this path: {}'.format(auto_certs_dir))
+        print('Configuring new certs from this path: {}'.format(auto_certs_dir))
+    else:
+        return
+
+    cert_path = os.path.join(auto_certs_dir, 'rift_auto.crt')
+    key_path = os.path.join(auto_certs_dir, 'rift_auto.key')
+
+    server_ssl_config_xpath = '/rw-auth-ext-web-svc:webauth-server-config/rw-auth-ext-web-svc:ssl-config'
+    client_config_xpath = '/rw-auth-ext-user:webauth-client-config'
+    webauth_server_proxy = mgmt_session.proxy(RwAuthExtWebSvcYang)
+    webauth_client_proxy = mgmt_session.proxy(RwAuthExtUserYang)
+
+    def configure_webauth_server():
+        logger.debug('configuring the webauth-server')
+        webauth_server_obj = RwAuthExtWebSvcYang.YangData_RwAuthExtWebSvc_WebauthServerConfig_SslConfig.from_dict(
+                                                        {'server_cert_path': cert_path, 'server_key_path': key_path})
+        webauth_server_proxy.replace_config(server_ssl_config_xpath, webauth_server_obj)
+
+    def configure_webauth_client():
+        logger.debug('configuring the webauth-client')
+        webauth_client_obj = RwAuthExtUserYang.YangData_RwAuthExtUser_WebauthClientConfig.from_dict(
+                                                                            {'ca_cert_path': cert_path})
+        webauth_client_proxy.merge_config(client_config_xpath, webauth_client_obj)
+
+    # Check if its running after launchpad reload; if so skip configuring the certs again (RIFT-17641)
+    server_ssl_config = webauth_server_proxy.get_config(server_ssl_config_xpath)
+    if server_ssl_config.server_cert_path != cert_path:
+        user, password = ['demo']*2
+        logger.debug('Adding an external user {}'.format(user))
+        rift.auto.mano.create_user(rw_user_proxy, user, password, user_domain)
+
+        # Shuffling the function calls for server and client configuration
+        list_func = [configure_webauth_server, configure_webauth_client]
+        random.shuffle(list_func)
+
+        # configuring either of the server or client
+        list_func.pop()()
+
+        # Try getting access token for an external user; it should fail
+        with pytest.raises(Exception,
+                           message='Should not be able to get access token for user {} as certs are not yet configured for both server and client'.format(
+                                   user)):
+            logger.debug('Trying to get access token for user {}'.format(user))
+            access_token = rift.auto.session.get_access_token(user, password, confd_host)
+            logger.debug('Access token for user {}: {}'.format(user, access_token))
+
+        list_func.pop()()
+
+        # Try getting access token for an external user; it should pass now
+        rift.auto.session.get_access_token(user, password, confd_host)
+
+        # RIFT-17641: Delete user 'demo'
+        rift.auto.mano.delete_user(rw_user_proxy, user, user_domain)
+
+@pytest.fixture(scope='session')
+def all_roles_combinations(all_roles):
+    """Returns a combination of all roles except single combinations i.e if there are a total of N roles, then it 
+    returns (2^N-1)-N role combinations.
+    Here, we have 11 roles, so it returns 2047-11=2036 combinations"""
+    all_roles_combinations_ = list()
+    for set_length in range(2, len(all_roles)+1):
+        for roles_combination in itertools.combinations(all_roles, set_length):
+            all_roles_combinations_.append(roles_combination)
+    return tuple(all_roles_combinations_)
diff --git a/rwlaunchpad/ra/pytest/ns/rbac/test_rbac.py b/rwlaunchpad/ra/pytest/ns/rbac/test_rbac.py
new file mode 100644
index 0000000..30c3261
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/rbac/test_rbac.py
@@ -0,0 +1,258 @@
+#!/usr/bin/env python3
+"""
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+"""
+
+import gi
+import pytest
+
+from rift.auto.session import NetconfSession, RestconfSession
+import rift.auto.mano
+
+gi.require_version('RwUserYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwRbacPlatformYang', '1.0')
+gi.require_version('RwRbacInternalYang', '1.0')
+from gi.repository import (
+    RwUserYang,
+    RwProjectYang,
+    RwRbacPlatformYang,
+    RwRbacInternalYang,
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+@pytest.fixture(scope='session')
+def rbac_test_data():
+    """Fixture which returns rbac test data: users, roles, projects being used in the test.
+    users: tuple of user names
+    projects: tuple of project names
+    map_platform_roles: mapping of a user to multiple platform roles
+    map_project_roles: mapping of a user to multiple projects (project, list of roles in that project)"""
+    users = ('admin3', 'user1', 'user2', )
+
+    projects = ('project1', 'project2', )
+
+    map_platform_roles = {
+                            'admin3': ['rw-rbac-platform:platform-admin'],
+                            }
+
+    map_project_roles = {
+                            'user1': [
+                                        ('project1', ['rw-project:project-admin']),
+                                        ('project2', ['rw-project:project-oper']),
+                                     ], 
+
+                            'user2': [
+                                        ('project1', ['rw-project:project-admin']),
+                                     ], 
+
+                            'admin3': [],
+                            }
+
+    return {'users': users, 'projects': projects, 'roles': (map_platform_roles, map_project_roles)}
+
+
+@pytest.mark.setup('rbac_setup')
+@pytest.mark.incremental
+class TestRbacSetup(object):
+    def test_create_users(self, rbac_test_data, rw_user_proxy, user_domain, rbac_user_passwd, logger):
+        """Creates all users as per rbac test-data  and verify if they are successfully created."""
+        users_test_data =  rbac_test_data['users']
+
+        # Create all users mentioned in users_test_data
+        for user in users_test_data:
+            rift.auto.mano.create_user(rw_user_proxy, user, rbac_user_passwd, user_domain)
+
+        # Verify users are created
+        user_config = rw_user_proxy.get_config('/user-config')
+        assert user_config
+
+        user_config_test_data = [user.user_name for user in user_config.user if user.user_name in users_test_data]
+        logger.debug('Users: {} have been successfully created'.format(user_config_test_data))
+
+        assert len(user_config_test_data) == len(users_test_data)
+
+    def test_create_projects(self, logger, rw_conman_proxy, rbac_test_data):
+        """Creates all projects as per rbac test-data and verify them."""
+        projects_test_data = rbac_test_data['projects']
+
+        # Create all projects mentioned in projects_test_data and verify if they are created
+        for project in projects_test_data:
+            logger.debug('Creating project {}'.format(project))
+            rift.auto.mano.create_project(rw_conman_proxy, project)
+
+    def test_assign_platform_roles_to_users(self, rbac_platform_proxy, logger, rbac_test_data, user_domain, rw_rbac_int_proxy):
+        """Assign platform roles to an user as per test data mapping and verify them."""
+        platform_roles_test_data, _ = rbac_test_data['roles']
+
+        # Loop through the user & platform-roles mapping and assign roles to the user
+        for user, roles in platform_roles_test_data.items():
+            for role in roles:
+                rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, role, user, user_domain, rw_rbac_int_proxy)
+
+        # Verify if the roles are assigned as per test data mapping
+        platform_config = rbac_platform_proxy.get_config('/rbac-platform-config')
+
+        platform_config_test_data_match = 0
+        logger.debug('Matching platform_roles_test_data with rbac-platform-config')
+        for user in platform_config.user:
+            if user.user_name in platform_roles_test_data:
+                logger.debug('Matched user: {}'.format(user.as_dict()))
+                platform_config_test_data_match += 1
+
+                test_data_user_platform_roles = platform_roles_test_data[user.user_name]
+                assert len(test_data_user_platform_roles) == len(user.role)
+                assert len(test_data_user_platform_roles) == len([role for role in user.role if role.role in test_data_user_platform_roles])
+
+        assert platform_config_test_data_match == len(platform_roles_test_data)
+
+    def test_assign_users_to_projects_roles(self, rbac_test_data, rw_project_proxy, user_domain, rw_rbac_int_proxy):
+        """Assign projects and roles to an user as per test data mapping."""
+        _, project_roles_test_data = rbac_test_data['roles']
+
+        # Loop through the user & (project, role) mapping and asign the project, role to the user
+        for user, project_role_tuple in project_roles_test_data.items():
+            for project, role_list in project_role_tuple:
+                for role in role_list:
+                    rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, user, project, user_domain, rw_rbac_int_proxy)
+
+
+@pytest.mark.depends('rbac_setup')
+@pytest.mark.incremental
+class TestRbacVerification(object):
+    def test_match_rbac_internal(self, mgmt_session, logger, rbac_test_data):
+        """Verifies the test data with rw-rbac-internal"""
+        rbac_intl_proxy = mgmt_session.proxy(RwRbacInternalYang)
+        rbac_intl = rbac_intl_proxy.get('/rw-rbac-internal')
+
+        # Verify users in show rw-rbac-internal
+        users_test_data =  rbac_test_data['users']
+        assert len(rbac_intl.user) == len(users_test_data) + 2   # 'admin', 'oper' are two default users
+        users_match = 0
+        for user in rbac_intl.user:
+            if user.user_name in users_test_data:
+                logger.info('User matched: {}'.format(user.as_dict()))
+                users_match += 1
+        assert users_match == len(users_test_data)
+
+        # Verify roles (only project roles mapping, not the platform roles mapping)
+        # Each role in rw-rbac-internal is associated with a project through the field 'keys'. All mapping from users to project 
+        # is part of project roles mapping.
+        _, project_roles_test_data = rbac_test_data['roles']
+        for user, project_role_tuple in project_roles_test_data.items():
+            for project, role_list in project_role_tuple:
+                for role in role_list:
+                    logger.debug("Matching user: '{}' and its role '{}' in project '{}'".format(user, role, project))
+                    
+                    # Verify there exists a role entry in rw-rbac-internal which matches 'role', 'project'
+                    rbac_intl_role = [role_ for role_ in rbac_intl.role if (role_.role==role and role_.keys==project)]
+
+                    # Each role is identified through its key 'project'. So there can be only one such role which matches 
+                    # the above 'role.role==role and role.keys=project'
+                    assert len(rbac_intl_role) == 1
+                    logger.info('Matched role in rw-rbac-internal: {}'.format(rbac_intl_role[0].as_dict()))
+
+                    # Verify the user list in this rw-rbac-internal role carries 'user'
+                    assert len([user_ for user_ in rbac_intl_role[0].user if user_.user_name==user]) == 1
+
+    def test_role_access(self, logger, session_class, confd_host, rbac_test_data, rbac_user_passwd, project_keyed_xpath):
+        """Verifies the roles assigned to users for a project. Login as each user and verify the user can only access 
+        the projects linked to it."""
+        _, project_roles_test_data = rbac_test_data['roles']
+        projects_test_data = rbac_test_data['projects']
+
+        for user, project_role_tuple in project_roles_test_data.items():
+            logger.debug('Verifying user: {}'.format(user))
+            projects_not_accessible = list(projects_test_data)
+
+            # Establish a session with this current user
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd)
+            print ("Connected using username {} password {}".format(user, rbac_user_passwd))
+
+            rw_project_proxy_ = user_session.proxy(RwProjectYang)
+            
+            if project_role_tuple:  # Skip the for loop for users who are not associated with any project e.g admin3
+                for project, role_list in project_role_tuple:
+                    projects_not_accessible.remove(project)
+                    project_config = rw_project_proxy_.get_config(project_keyed_xpath.format(project_name=quoted_key(project))+'/project-config')
+                    user_ = [user_ for user_ in project_config.user if user_.user_name==user]
+                    logger.debug('User: {}'.format(user_[0].as_dict()))
+                    assert len(user_) == 1
+
+                    # Match the roles for this user
+                    assert set(role_list) == set([role_.role for role_ in user_[0].role])
+
+            # It can't access any other project.
+            for project in projects_not_accessible:
+                assert rw_project_proxy_.get_config(project_keyed_xpath.format(project_name=quoted_key(project))+'/project-config') is None # It should 
+                # return None as the project is not mapped to this user.
+
+    def test_admin_user(self, logger, rw_project_proxy, project_keyed_xpath, rbac_test_data):
+        """Verify admin can see all projects as part of test-data as well as the default project"""
+        projects_test_data = rbac_test_data['projects']
+        projects_test_data = projects_test_data + ('default', )
+
+        # Verify admin user can see all projects including default
+        # If it is post-reboot verification, then check default project should not be listed
+        for project in projects_test_data:
+            project_ = rw_project_proxy.get_config(project_keyed_xpath.format(project_name=quoted_key(project))+'/project-state', list_obj=True)
+            if project=='default' and pytest.config.getoption('--default-project-deleted'):
+                assert project_ is None
+                continue
+            assert project_     # If the project doesn't exist, it returns None
+
+
+@pytest.mark.depends('rbac_setup')
+@pytest.mark.teardown('rbac_setup')
+@pytest.mark.incremental
+class TestRbacTeardown(object):
+    def test_delete_default_project(self, logger, rw_conman_proxy):
+        """Only deletes the default project"""
+        logger.debug('Deleting the default project')
+        rift.auto.mano.delete_project(rw_conman_proxy, 'default')
+    
+    def test_delete_projects(self, logger, rbac_test_data, rw_conman_proxy):
+        """Deletes the projects which are part of rbac test-data and verify their deletion"""
+        projects_test_data = rbac_test_data['projects']
+
+        # Delete the projects
+        for project in projects_test_data:
+            logger.debug('Deleting project {}'.format(project))
+            rift.auto.mano.delete_project(rw_conman_proxy, project)
+
+    def test_delete_users(self, logger, rw_user_proxy, rbac_platform_proxy, platform_config_keyed_xpath, 
+                                    user_keyed_xpath, rbac_test_data, user_domain):
+        """Deletes the users which are part of rbac test-data and verify their deletion"""
+        users_test_data = rbac_test_data['users']
+        map_platform_roles, _ = rbac_test_data['roles']
+
+        # Deletes the users
+        # If an user is associated with a platform role, at first it needs be removed from rbac-platform-config
+        # before deleting it from user-config
+        for user in users_test_data:
+            if user in map_platform_roles:
+                rbac_platform_proxy.delete_config(platform_config_keyed_xpath.format(user=quoted_key(user), domain=quoted_key(user_domain)))
+            rw_user_proxy.delete_config(user_keyed_xpath.format(user=quoted_key(user), domain=quoted_key(user_domain)))
+
+        # Verify if the users are deleted
+        user_config = rw_user_proxy.get_config('/user-config')
+        default_users = [user.user_name for user in user_config.user]
+
+        logger.debug('Default users list: {}'.format(default_users))
+        expected_empty_user_list = [user for user in users_test_data if user in default_users]
+        assert not expected_empty_user_list
diff --git a/rwlaunchpad/ra/pytest/ns/rbac/test_rbac_identity.py b/rwlaunchpad/ra/pytest/ns/rbac/test_rbac_identity.py
new file mode 100644
index 0000000..9d05c37
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/rbac/test_rbac_identity.py
@@ -0,0 +1,505 @@
+#!/usr/bin/env python3
+"""
+#
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+#This file contains the code for RIFT-16314, RIFT-16315, RIFT-16536,
+#RIFT-16537, RIFT-16541, RIFT-16313, RIFT-16692, RIFT-16637, RIFT-16636.
+"""
+import gi
+import pytest
+
+import rift.auto.mano
+
+gi.require_version('RwUserYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwRbacPlatformYang', '1.0')
+gi.require_version('RwRbacInternalYang', '1.0')
+gi.require_version('RwlogMgmtYang', '1.0')
+
+
+from gi.repository import (
+    RwUserYang,
+    RwProjectYang,
+    RwRbacPlatformYang,
+    RwRbacInternalYang,
+    RwlogMgmtYang,
+    RwConmanYang
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+@pytest.mark.setup('rbac_setup')
+@pytest.mark.incremental
+class TestIdentity(object):
+    """Test Identity."""
+
+    platform_role_users = ['platform_user_admin', 'platform_user_oper', 'platform_user_super_admin']
+    platform_users = ['platform_user_admin', 'platform_user_oper', 'platform_user_test', 'platform_user_super_admin']
+
+    project_roles = (
+        'rw-project-mano:catalog-oper', 'rw-project-mano:catalog-admin',
+        'rw-project-mano:lcm-oper', 'rw-project-mano:lcm-admin',
+        'rw-project-mano:account-oper', 'rw-project-mano:account-admin',
+        'rw-project:project-admin', 'rw-project:project-oper'
+    )
+    platform_roles = (
+        'rw-rbac-platform:platform-admin',
+        'rw-rbac-platform:platform-oper',
+        'rw-rbac-platform:super-admin'
+    )
+
+    RBAC_PROJECTS = ['default']
+    RBAC_USERS = []
+
+    TEST_PROJECTS = []
+    TEST_USERS = []
+
+    # This is required so as to track the
+    # already deleted users when creation and deletion
+    # are performed in ad-hoc way.
+    # Checking this set allows us to ignore Proxy request
+    # errors when deletion is performed twice.
+    DELETED_PROJECTS_TRACKER = set()
+
+    INVALID_CREDENTIALS = {
+        'Jason' * 500: 'likeu' * 500
+    }
+
+    POSSIBLY_PROBLEMATIC_CREDENTIALS = {
+        'Ja#son': ['lik#eu', 'syste#m'],
+        'Ja&son': ['lik&eu', 'syste&m'],
+        'J%ason': ['lik%eu', 'syste%m'],
+        'Jåson': ['likeü', 'system'],
+        '<Jason>': ['<likeu>', '<system>'],
+        '/jason': ['/likeu', '/system;'],
+        'jason;': ['likeu;', 'system;'],
+        'j*son': ['like*u;', 'syste*m'],
+        'j@so?': ['l!keu;', 'system!']
+    }
+
+    INAVLID_LOGIN_CREDENTIALS = {
+        'wrong_username': 'mypasswd',
+        'testuser': 0,
+        0: 'mypasswd',
+        0: 0,
+        'wrong_username': 'wrong_password'
+    }
+
+    INVALID_PROJECT_KEYS = ['this_project_doesnt_exist', 'Test01']
+    INVALID_PROJECT_CREATE_KEYS = ['testproject' * 500, ]
+    #POSSIBLY_PROBLEMATIC_KEYS = ['/projectname', 'project name', 'projectname.', 'project,name', 'Projëçt', 'Pro;je:ct', 'Proj*ct', 'Pr@ject']
+    POSSIBLY_PROBLEMATIC_KEYS = ['/projectname', 'project name', 'projectname.', 'project,name', 'Pro;je:ct', 'Proj*ct', 'Pr@ject']
+
+    def test_platform_roles(self, rw_user_proxy, rbac_platform_proxy, rbac_user_passwd, user_domain, session_class, tbac, 
+                                                                        confd_host, platform_roles, rw_rbac_int_proxy):
+        # Setting users and roles up for upcoming checks
+        rift.auto.mano.create_user(rw_user_proxy, 'platform_user_super_admin', rbac_user_passwd, user_domain)
+        rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, 'rw-rbac-platform:super-admin',
+                                                            'platform_user_super_admin', user_domain, rw_rbac_int_proxy)
+        rift.auto.mano.create_user(rw_user_proxy, 'platform_user_admin', rbac_user_passwd, user_domain)
+        rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, 'rw-rbac-platform:platform-admin',
+                                                            'platform_user_admin', user_domain, rw_rbac_int_proxy)
+        rift.auto.mano.create_user(rw_user_proxy, 'platform_user_oper', rbac_user_passwd, user_domain)
+        rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, 'rw-rbac-platform:platform-oper',
+                                                            'platform_user_oper', user_domain, rw_rbac_int_proxy)
+        rift.auto.mano.create_user(rw_user_proxy, 'platform_user_test', rbac_user_passwd, user_domain)
+
+        """Various access tests for platform users"""
+
+        # Testing if platform role users have access to /rbac-platform-config
+        for user in self.platform_role_users:
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd)
+            pxy = user_session.proxy(RwRbacPlatformYang)
+            access_ = pxy.get_config("/rbac-platform-config/user[user-name='platform_user_admin'][user-domain={}]"
+                                .format(quoted_key(user_domain)))
+            assert access_ is not None
+            rift.auto.mano.close_session(user_session)
+
+        # Testing if platform role users have access to /rbac-platform-state
+        for user in self.platform_role_users:
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd)
+            pxy = user_session.proxy(RwRbacPlatformYang)
+            access_ = pxy.get_config("/rbac-platform-state/user[user-name='platform_user_admin'][user-domain={}]"
+                                .format(quoted_key(user_domain)))
+            if user == 'platform_user_oper':
+                    assert access_ is None
+            else:
+                """At the time of writing this code, /rbac-platform-state/user is unpopulated and so the access_ will be None no matter what.
+                In the future when the path /rbac-platform-state/user is populated this test will break. When that happens, just change 
+                the next line to 'access_ is not None'
+                """
+                assert access_ is None
+            rift.auto.mano.close_session(user_session)
+
+        """Changing roles and verifying it """
+
+        # Case 01 Assign and then revoke that role. Assign a second role and see if that sticks and that the older role hasn't stayed on.
+        rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, 'rw-rbac-platform:platform-oper', 
+                                                            'platform_user_test', user_domain, rw_rbac_int_proxy)
+        rift.auto.mano.revoke_platform_role_from_user(rbac_platform_proxy, 'rw-rbac-platform:platform-oper', 
+                                                            'platform_user_test', user_domain)
+        rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, 'rw-rbac-platform:platform-admin', 
+                                                            'platform_user_test', user_domain, rw_rbac_int_proxy)
+        # If the older role didn't stick and the new role did stick (as it should), then the user should be able to change another users password
+        user_session = rift.auto.mano.get_session(session_class, confd_host, 'platform_user_test', rbac_user_passwd)
+        pxy = user_session.proxy(RwUserYang)
+        rift.auto.mano.update_password(pxy, 'platform_user_oper', 'even_newer_password', user_domain, rw_rbac_int_proxy)
+        rift.auto.mano.close_session(user_session)
+
+        # Case 02 Switching the roles back after Case 01
+        rift.auto.mano.revoke_platform_role_from_user(rbac_platform_proxy, 'rw-rbac-platform:platform-admin',
+                                                            'platform_user_test', user_domain)
+        rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, 'rw-rbac-platform:platform-oper',
+                                                            'platform_user_test', user_domain, rw_rbac_int_proxy)
+        # If the older role didn't stick and the new role did stick (as it should), then the user shouldn't be able to change another users password
+        user_session = rift.auto.mano.get_session(session_class, confd_host, 'platform_user_test', rbac_user_passwd)
+        pxy = user_session.proxy(RwUserYang)
+        with pytest.raises(Exception, message="User shouldn't be able to change another user's password") as excinfo:
+            rift.auto.mano.update_password(pxy, 'platform_user_oper', 'new_password', user_domain, rw_rbac_int_proxy)
+        rift.auto.mano.close_session(user_session)
+
+        if not tbac:
+            """Disabling and enabling users and verifying it"""
+
+            rift.auto.mano.create_user(rw_user_proxy, 'disabled_user', rbac_user_passwd, user_domain)
+            rift.auto.mano.update_password(rw_user_proxy, 'platform_user_oper', rbac_user_passwd, user_domain, rw_rbac_int_proxy)
+            # Checking if the disabled user can login
+            rift.auto.mano.disable_user(rw_user_proxy, 'disabled_user', user_domain, rw_rbac_int_proxy)
+            with pytest.raises(Exception, message="User shouldn't be able to login as he is disabled") as excinfo:
+                user_session = rift.auto.mano.get_session(session_class, confd_host, 'disabled_user', rbac_user_passwd, timeout=5)
+            # Checking if he can login after he has been enabled back on.
+            rift.auto.mano.enable_user(rw_user_proxy, 'disabled_user', user_domain, rw_rbac_int_proxy)
+            user_session = rift.auto.mano.get_session(session_class, confd_host, 'disabled_user', rbac_user_passwd)
+            rift.auto.mano.close_session(user_session)
+            # All platform roles trying to change the status of a user
+            for user in self.platform_role_users:
+                user_session = rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd)
+                pxy = user_session.proxy(RwUserYang)
+                if user == 'platform_user_oper':
+                    with pytest.raises(Exception, message="Platform oper shouldn't be able to disable other users") as excinfo:
+                        rift.auto.mano.disable_user(pxy, 'disabled_user', user_domain, rw_rbac_int_proxy)
+                else:
+                    rift.auto.mano.disable_user(pxy, 'disabled_user', user_domain, rw_rbac_int_proxy)
+                    rift.auto.mano.enable_user(pxy, 'disabled_user', user_domain, rw_rbac_int_proxy)
+                rift.auto.mano.close_session(user_session)
+
+            # Testing if users can change their own passwords
+            for user in self.platform_users:
+                user_session = rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd)
+                pxy = user_session.proxy(RwUserYang)
+                rift.auto.mano.update_password(pxy, user, 'new_password', user_domain, rw_rbac_int_proxy)
+                rift.auto.mano.close_session(user_session)
+
+            # Testing if platform role users can change the password of another user
+            for idx, user in enumerate(self.platform_role_users, 1):
+                user_session = rift.auto.mano.get_session(session_class, confd_host, user, 'new_password')
+                pxy = user_session.proxy(RwUserYang)
+                if user == 'platform_user_oper':
+                    with pytest.raises(Exception, message="User shouldn't be able to change another user's password") as excinfo:
+                        rift.auto.mano.update_password(pxy, 'platform_user_test', 'even_newer_password_{}'.format(idx), user_domain, rw_rbac_int_proxy)
+                else:
+                    rift.auto.mano.update_password(pxy, 'platform_user_test', 'even_newer_password_{}'.format(idx), user_domain, rw_rbac_int_proxy)
+                rift.auto.mano.close_session(user_session)
+
+            # Testing if platform users have access to logging
+            for user in self.platform_role_users:
+                user_session = rift.auto.mano.get_session(session_class, confd_host, user, 'new_password')
+                pxy = user_session.proxy(RwlogMgmtYang)
+                access_ = pxy.get_config('/logging')
+                assert access_ is not None
+                rpc_input = RwlogMgmtYang.YangInput_RwlogMgmt_ShowLogs.from_dict({'all': 'None'})
+                pxy.rpc(rpc_input)
+                rpc_input_1 = RwlogMgmtYang.YangInput_RwlogMgmt_LogEvent.from_dict({'on': 'None'})
+                pxy.rpc(rpc_input_1)
+                rift.auto.mano.close_session(user_session)
+
+    def rbac_internal_check(self, mgmt_session, xpath):
+
+        rbac_intl_proxy = mgmt_session.proxy(RwRbacInternalYang)
+        rbac_intl_proxy.wait_for(xpath, "active", timeout=5)
+
+    def test_rbac_internal_verification(self, rw_user_proxy, rw_conman_proxy, rbac_user_passwd, user_domain, mgmt_session, 
+                                                                rw_project_proxy, rbac_platform_proxy, rw_rbac_int_proxy):
+        """Doing various tasks and verifying if rbac-internal is reflecting these changes properly"""
+
+        # Creating projects and users for verifying the rbac-internal scenario
+        for idx in range(1, 4):
+            project_name = 'rbac_project_{}'.format(idx)
+            rift.auto.mano.create_project(rw_conman_proxy, project_name)
+            self.RBAC_PROJECTS.append(project_name)
+
+            if project_name in self.DELETED_PROJECTS_TRACKER:
+                self.DELETED_PROJECTS_TRACKER.remove(project_name)
+
+        for idx in range(1, 5):
+            rift.auto.mano.create_user(rw_user_proxy, 'rbac_user_{}'.format(idx), rbac_user_passwd, user_domain)
+            self.RBAC_USERS.append('rbac_user_{}'.format(idx))
+
+        # Rbac-Internal Verification
+        project_order = [0, 1, 2, 3, 0]
+        xpath = '/rw-rbac-internal/role[role={role}][keys={project}]/user[user-name={user}][user-domain={domain}]/state-machine/state'
+        # Assigning four users to four projects with two project roles for each user and checking the rbac-internal
+        for idx in range(0, 4):
+            fdx = project_order[idx]
+            ldx = project_order[idx + 1]
+            role = self.project_roles[2 * idx]
+            role1 = self.project_roles[(2 * idx) + 1]
+            rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, self.RBAC_USERS[idx],
+                                                    self.RBAC_PROJECTS[fdx], user_domain, rw_rbac_int_proxy)
+            self.rbac_internal_check(mgmt_session, xpath.format(role=quoted_key(role), project=quoted_key(self.RBAC_PROJECTS[fdx]),
+                                                    user=quoted_key(self.RBAC_USERS[idx]), domain=quoted_key(user_domain)))
+            rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role1, self.RBAC_USERS[idx],
+                                                    self.RBAC_PROJECTS[ldx], user_domain, rw_rbac_int_proxy)
+            self.rbac_internal_check(mgmt_session, xpath.format(role=quoted_key(role1), project=quoted_key(self.RBAC_PROJECTS[ldx]),
+                                                    user=quoted_key(self.RBAC_USERS[idx]), domain=quoted_key(user_domain)))
+        # Deleting the four projects and then checking rw-rbac-internal
+        for project_name in self.RBAC_PROJECTS:
+            rift.auto.mano.delete_project(rw_conman_proxy, project_name)
+            print ("Deleting project: {}".format(project_name))
+            self.DELETED_PROJECTS_TRACKER.add(project_name)
+
+        for idx in range(0, 4):
+            fdx = project_order[idx]
+            ldx = project_order[idx + 1]
+            role = self.project_roles[2 * idx]
+            role1 = self.project_roles[(2 * idx) + 1]
+
+            with pytest.raises(Exception, message="This user {} (with this role {} and project {}) shouldn't be on rbac-internal."
+                                        .format(self.RBAC_USERS[idx], role, self.RBAC_PROJECTS[fdx])) as excinfo:
+                self.rbac_internal_check(mgmt_session, xpath.format(role=quoted_key(role), project=quoted_key(self.RBAC_PROJECTS[fdx]),
+                                        user=quoted_key(self.RBAC_USERS[idx]), domain=quoted_key(user_domain)))
+            with pytest.raises(Exception, message="This user {} (with this role {} and project {}) shouldn't be on rbac-internal."
+                                        .format(self.RBAC_USERS[idx], role1, self.RBAC_PROJECTS[ldx])) as excinfo:
+                self.rbac_internal_check(mgmt_session, xpath.format(role=quoted_key(role1), project=quoted_key(self.RBAC_PROJECTS[ldx]),
+                                        user=quoted_key(self.RBAC_USERS[idx]), domain=quoted_key(user_domain)))
+
+    def test_roles_revoke(self, rw_conman_proxy, rw_user_proxy, rbac_platform_proxy, rw_project_proxy, 
+                                                                    rbac_user_passwd, user_domain, rw_rbac_int_proxy):
+        """Assigning all the roles and then revoking them"""
+
+        # Creating users and assigning each of them a role
+        rift.auto.mano.create_project(rw_conman_proxy, 'test01')
+        for incrementor, role in enumerate(self.project_roles + self.platform_roles, 1):
+            user_name = 'test_user_{}'.format(incrementor)
+            rift.auto.mano.create_user(rw_user_proxy, user_name, rbac_user_passwd, user_domain)
+
+            if 'platform' in role:
+                rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, role, user_name, user_domain, rw_rbac_int_proxy)
+            else:
+
+                rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, user_name, 'test01', user_domain, rw_rbac_int_proxy)
+
+        # Removing the assigned roles from each user
+        for incrementor, role in enumerate(self.project_roles + self.platform_roles, 1):
+            user_name = 'test_user_{}'.format(incrementor)
+            if 'platform' in role:
+                rift.auto.mano.revoke_platform_role_from_user(rbac_platform_proxy, role, user_name, user_domain)
+                rift.auto.mano.revoke_user_from_platform_config(rbac_platform_proxy, user_name, user_domain)
+            else:
+                rift.auto.mano.revoke_project_role_from_user(rw_project_proxy, role, user_name, 'test01', user_domain)
+
+    def test_misbehaviours(
+            self, rw_user_proxy, rbac_user_passwd, user_domain,
+            session_class, confd_host, tbac, rw_rbac_int_proxy):
+        """Verify if bad credentials can cause any problems."""
+        rift.auto.mano.create_user(
+            rw_user_proxy, 'testuser', rbac_user_passwd, user_domain)
+        # Trying to login with an incorrect password multiple times
+        counter = 1
+        while(counter < 4):
+            with pytest.raises(
+                Exception,
+                message="User was able to login with the wrong password"
+            ):
+                rift.auto.mano.get_session(
+                    session_class, confd_host, 'testuser', 'wrong_password',
+                    timeout=5)
+            counter += 1
+
+        # Trying to login with INAVLID_LOGIN_CREDENTIALS
+        for uname, passwd in self.INAVLID_LOGIN_CREDENTIALS.items():
+            with pytest.raises(
+                Exception,
+                message="User logged im with invalid login credentials"
+            ):
+                rift.auto.mano.get_session(
+                    session_class, confd_host, uname, passwd, timeout=5)
+        # Creating a user with POSSIBLY_PROBLEMATIC_CREDENTIALS
+        if tbac:
+            for uname, passwd in self.POSSIBLY_PROBLEMATIC_CREDENTIALS.items():
+                rift.auto.mano.create_user(
+                    rw_user_proxy, uname,
+                    passwd[0],
+                    passwd[1]
+                )
+        else:
+            for uname, passwd in self.POSSIBLY_PROBLEMATIC_CREDENTIALS.items():
+                rift.auto.mano.create_user(
+                    rw_user_proxy, uname,
+                    passwd[0],
+                    user_domain
+                )
+        # Creating a user with INVALID_CREDENTIALS
+        for username, password in self.INVALID_CREDENTIALS.items():
+            with pytest.raises(
+                Exception,
+                message="User created with invalid credentials"
+            ):
+                rift.auto.mano.create_user(
+                    rw_user_proxy, username, password, user_domain)
+        # Delete the users created with POSSIBLY_PROBLEMATIC_CREDENTIALS
+        if tbac:
+            for uname, domain in self.POSSIBLY_PROBLEMATIC_CREDENTIALS.items():
+                rift.auto.mano.delete_user(
+                    rw_user_proxy, uname,
+                    domain[1]
+                )
+        else:
+            for uname, passwd in self.POSSIBLY_PROBLEMATIC_CREDENTIALS.items():
+                rift.auto.mano.delete_user(
+                    rw_user_proxy, uname, user_domain
+                )
+
+    def test_project_keys(
+            self, rw_project_proxy, rbac_user_passwd, session_class,
+            confd_host):
+        """Trying to access/create various projects with bad project keys."""
+        # Checking if INVALID_PROJECT_KEYS can be accessed.
+        for project_name in self.INVALID_PROJECT_KEYS:
+            project_cm_config_xpath = '/project[name={project_name}]/project-state'
+            project_ = rw_project_proxy.get_config(
+                project_cm_config_xpath.format(
+                    project_name=quoted_key(project_name)
+                ),
+                list_obj=True
+            )
+            assert project_ is None
+        # Trying to create projects with INVALID_PROJECT_CREATE_KEYS
+        for project_name in self.INVALID_PROJECT_CREATE_KEYS:
+            with pytest.raises(
+                Exception,
+                message="Project created with the INVALID_PROJECT_CREATE_KEYS"
+            ):
+                rift.auto.mano.create_project(rw_conman_proxy, project_name)
+        # These POSSIBLY_PROBLEMATIC_KEYS should not cause any error in theory.
+        for project_name in self.POSSIBLY_PROBLEMATIC_KEYS:
+            rift.auto.mano.create_project(rw_project_proxy, project_name)
+        # User trying to access a project he has no access to.
+        user_session = rift.auto.mano.get_session(
+            session_class, confd_host, 'test_user_11', rbac_user_passwd)
+        pxy = user_session.proxy(RwConmanYang)
+        project_ = pxy.get_config(
+            project_cm_config_xpath.format(
+                project_name=quoted_key('test01')
+            )
+        )
+        assert project_ is None
+        rift.auto.mano.close_session(user_session)
+
+    def test_project_testing(self, rw_conman_proxy, rw_user_proxy, rw_project_proxy, rbac_user_passwd, user_domain, rw_rbac_int_proxy):
+        """Multiple projects creation, deletion, re-addition with verification every step of the way"""
+
+        # Creating projects and users for this test case
+        for idx in range(1,5):
+            project_name = 'testing_project_{}'.format(idx)
+            rift.auto.mano.create_project(rw_conman_proxy, project_name)
+            self.TEST_PROJECTS.append(project_name)
+            if project_name in self.DELETED_PROJECTS_TRACKER:
+                self.DELETED_PROJECTS_TRACKER.remove(project_name)
+
+        for idx in range(1,9):
+            rift.auto.mano.create_user(rw_user_proxy, 'testing_user_{}'.format(idx), rbac_user_passwd, user_domain)
+            self.TEST_USERS.append('testing_user_{}'.format(idx))
+
+        # Assigning project roles to users
+        for idx in range(0,8):
+            role = self.project_roles[idx]
+            rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, self.TEST_USERS[idx], 
+                                                    self.TEST_PROJECTS[idx//2], user_domain, rw_rbac_int_proxy)
+
+        # Deleting all test projects
+        for project_name in self.TEST_PROJECTS:
+            rift.auto.mano.delete_project(rw_conman_proxy, project_name)
+            self.DELETED_PROJECTS_TRACKER.add(project_name)
+
+        # Recreating all the deleted projects
+        for project_name in self.TEST_PROJECTS:
+            rift.auto.mano.create_project(rw_conman_proxy, project_name)
+            if project_name in self.DELETED_PROJECTS_TRACKER:
+                self.DELETED_PROJECTS_TRACKER.remove(project_name)
+
+        # Check if the recreated projects have the old users assigned to them still.
+        for idx in range(0,8):
+            role = self.project_roles[idx]
+            role_keyed_path = "/project[name={project}]/project-config/user[user-name={user}][user-domain={domain}]/role[role={user_role}]"
+            role_ = rw_project_proxy.get_config(role_keyed_path.format(project=quoted_key(self.TEST_PROJECTS[idx//2]),
+                                                user=quoted_key(self.TEST_USERS[idx]), domain=quoted_key(user_domain), user_role=quoted_key(role)))
+            assert role_ is None, "This user shouldn't exist in this project which was just created"
+
+        # Reassigning the old users to their old roles.
+        for idx in range(0,8):
+            role = self.project_roles[idx]
+            rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, self.TEST_USERS[idx],
+                                                    self.TEST_PROJECTS[idx//2], user_domain, rw_rbac_int_proxy)
+
+
+@pytest.mark.depends('rbac_setup')
+@pytest.mark.teardown('rbac_setup')
+@pytest.mark.incremental
+class TestTeardown(object):
+    """Class Teardown."""
+
+    def test_delete_projects(self, rw_conman_proxy):
+        invalid_projects = TestIdentity.POSSIBLY_PROBLEMATIC_KEYS + ['test01']
+        valid_projects = TestIdentity.TEST_PROJECTS + TestIdentity.RBAC_PROJECTS
+        all_projects = valid_projects + invalid_projects
+
+        for project_name in all_projects:
+            try:
+                rift.auto.mano.delete_project(rw_conman_proxy, project_name)
+            except rift.auto.session.ProxyRequestError as e:
+                if project_name in TestIdentity.DELETED_PROJECTS_TRACKER:
+                    print ("Project {} is already deleted".format(project_name))
+                elif project_name not in invalid_projects:
+                    print ("Failed to delete project: {}".format(project_name))
+                    raise e
+
+    def test_delete_users(self, rw_user_proxy, rbac_platform_proxy, user_domain):
+        users_test_data = ['testuser']
+        for incrementor, role in enumerate(TestIdentity.project_roles + TestIdentity.platform_roles, 1):
+            users_test_data.append('test_user_{}'.format(incrementor))
+
+        for user in TestIdentity.platform_users:
+            users_test_data.append(user)
+
+        # Deletes the users
+        for user in users_test_data+TestIdentity.RBAC_USERS+TestIdentity.TEST_USERS:
+            try:
+                keyed_path = "/rbac-platform-config/user[user-name={user}][user-domain={domain}]"
+                platform_cfg_ent = rbac_platform_proxy.get_config(keyed_path.format(user=quoted_key(user), domain=quoted_key(user_domain)))
+
+                if platform_cfg_ent is not None:
+                    # Delete from the platform-config first.
+                    rift.auto.mano.revoke_user_from_platform_config(rbac_platform_proxy, user, user_domain)
+                rift.auto.mano.delete_user(rw_user_proxy, user, user_domain)
+
+            except rift.auto.session.ProxyRequestError as e:
+                if user not in TestIdentity.INAVLID_LOGIN_CREDENTIALS:
+                    print ("Deletion of user {} failed".format(user))
+                    raise e
+                else:
+                    print ("Expected error deleting invalid user {}".format(user))
diff --git a/rwlaunchpad/ra/pytest/ns/rbac/test_rbac_mano_xpath_access.py b/rwlaunchpad/ra/pytest/ns/rbac/test_rbac_mano_xpath_access.py
new file mode 100644
index 0000000..71e96a9
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/rbac/test_rbac_mano_xpath_access.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python3
+"""
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+"""
+
+import pytest
+import gi
+
+import rift.auto.mano
+import rift.auto.descriptor
+
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwSdnYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwImageMgmtYang', '1.0')
+gi.require_version('RwStagingMgmtYang', '1.0')
+gi.require_version('RwPkgMgmtYang', '1.0')
+
+from gi.repository import (
+    RwProjectNsdYang,
+    RwProjectVnfdYang,
+    RwCloudYang,
+    RwSdnYang,
+    RwLaunchpadYang,
+    RwVnfrYang,
+    RwNsrYang,
+    RwImageMgmtYang,
+    RwStagingMgmtYang,
+    RwPkgMgmtYang,
+)
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+@pytest.fixture(scope='module')
+def mano_xpaths():
+    """All xpaths which need to be accessed by users with various roles"""
+
+    xpaths_dict = {
+        'catalog' : ('/vnfd-catalog', '/nsd-catalog'),
+        'accounts' : ('/cloud', '/sdn'),
+        'records' : ('/vnfr-catalog', '/vnfr-console', '/ns-instance-config', '/ns-instance-opdata'),
+        'pkg-mgmt' : ('/staging-areas', '/upload-jobs', '/copy-jobs', '/download-jobs'), 
+        'config-agent': ('/config-agent',),
+        'ro' : ('/resource-orchestrator',),
+        'datacenter' : ('/datacenters',),
+    }
+    return xpaths_dict
+
+
+@pytest.fixture(scope='module')
+def mano_roles_xpaths_mapping():
+    """Mano roles and its accessible xpaths mapping"""
+    mano_roles_xpaths_mapping_dict = {
+        'rw-project:project-admin': ('catalog', 'accounts', 'records', 'pkg-mgmt', 'config-agent', 'ro', 'datacenter'), 
+        'rw-project:project-oper' : ('catalog', 'accounts', 'records', 'pkg-mgmt', 'config-agent', 'ro', 'datacenter'),  
+        'rw-project-mano:catalog-oper' : ('catalog', 'pkg-mgmt'), 
+        'rw-project-mano:catalog-admin' : ('catalog', 'pkg-mgmt'),  
+        'rw-project-mano:lcm-admin' : ('catalog', 'accounts', 'records', 'config-agent', 'datacenter'), 
+        'rw-project-mano:lcm-oper' : ('records',), 
+        'rw-project-mano:account-admin' : ('accounts', 'config-agent', 'ro', 'datacenter'), 
+        'rw-project-mano:account-oper' : ('accounts', 'config-agent', 'ro', 'datacenter'), 
+    }
+    return mano_roles_xpaths_mapping_dict
+
+
+@pytest.fixture(scope='module')
+def xpath_module_mapping():
+    """Mano Xpaths and its module mapping. Value also carries config or opdata type along with yang-module"""
+    xpath_module_mapping_dict = {
+        ('/vnfd-catalog',): (RwProjectVnfdYang, 'get_config'), 
+        ('/nsd-catalog',): (RwProjectNsdYang, 'get_config'),
+        ('/cloud',): (RwCloudYang, 'get_config'),
+        ('/sdn',): (RwSdnYang, 'get_config'),
+        ('/vnfr-catalog', '/vnfr-console'): (RwVnfrYang, 'get'),
+        ('/ns-instance-config', '/ns-instance-opdata'): (RwNsrYang, 'get'), 
+        ('/upload-jobs', '/download-jobs'): (RwImageMgmtYang, 'get'),
+        ('/copy-jobs', ): (RwPkgMgmtYang, 'get'),
+        ('/staging-areas',): (RwStagingMgmtYang, 'get'),
+        ('/resource-orchestrator', '/datacenters'): (RwLaunchpadYang, None),
+        ('/config-agent',): None,
+    }
+    return xpath_module_mapping_dict
+
+@pytest.mark.setup('mano_xpath_access')
+@pytest.mark.depends('nsr')
+@pytest.mark.incremental
+class TestRbacManoXpathAccess(object):
+    def test_copy_nsd_catalog_item(self, mgmt_session):
+        """Copy a NSD catalog item, so that /copy-jobs xpath can be tested."""
+        nsd_path = '/rw-project:project[rw-project:name="default"]/nsd-catalog'
+        nsd = mgmt_session.proxy(RwProjectNsdYang).get_config(nsd_path)
+        nsd_pkg_id = nsd.nsd[0].id
+        rpc_input = RwPkgMgmtYang.YangInput_RwPkgMgmt_PackageCopy.from_dict(
+            {'package_type': 'NSD', 'package_id': nsd_pkg_id, 'package_name': 'test_nsd_copy',
+             'project_name': 'default'})
+        mgmt_session.proxy(RwPkgMgmtYang).rpc(rpc_input)
+
+    def test_rbac_mano_xpaths_access(self, mano_xpaths, logger, mano_roles_xpaths_mapping, xpath_module_mapping, session_class,
+        project_keyed_xpath, user_domain, rbac_platform_proxy, rw_project_proxy, rbac_user_passwd, confd_host, rw_user_proxy, rw_rbac_int_proxy):
+        """Verify Mano roles/Permission mapping works (Verifies only read access for all Xpaths)."""
+        project_name = 'default'
+
+        # Skipping download-jobs as it is not yet implemented from MANO side.
+        # Others are skipped becuase they need Juju, Openmano configurations etc.
+        skip_xpaths = ('/download-jobs', '/config-agent', '/resource-orchestrator', '/datacenters', '/upload-jobs')
+        
+        for index, (role, xpath_keys_tuple) in enumerate(mano_roles_xpaths_mapping.items()):
+            # Create an user and assign a role 
+            user_name = 'user-{}'.format(index)
+            rift.auto.mano.create_user(rw_user_proxy, user_name, rbac_user_passwd, user_domain)
+            logger.debug('Creating an user {} with role {}'.format(user_name, role))
+            if 'platform' in role:
+                rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, role, user_name, user_domain, rw_rbac_int_proxy)
+            else:
+                rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, user_name, project_name, user_domain, rw_rbac_int_proxy)
+                
+            # Get user session
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user_name, rbac_user_passwd)
+
+            # go through each of its xpaths keys and try to access
+            for xpath_key in xpath_keys_tuple:
+                for xpath in mano_xpaths[xpath_key]:
+                    if xpath in skip_xpaths:
+                        continue
+                    logger.debug('User {} with role {} trying to access xpath {}'.format(user_name, role, xpath))
+                    yang_module, get_type = [yang_module for xpath_tuple, yang_module in xpath_module_mapping.items() 
+                                                                                            if xpath in xpath_tuple][0]
+                    user_pxy = user_session.proxy(yang_module)
+                    get_data_func = getattr(user_pxy, get_type)
+                    assert get_data_func(project_keyed_xpath.format(project_name=quoted_key(project_name))+xpath) 
+
+            # go through remaining xpaths keys which this user-role not part of and try to access; it should fail
+            access_denied_xpath_keys_tuple = set(mano_xpaths.keys()).difference(xpath_keys_tuple)
+            for xpath_key in access_denied_xpath_keys_tuple:
+                for xpath in mano_xpaths[xpath_key]:
+                    if xpath in skip_xpaths:
+                        continue
+                    logger.debug('User {} with role {} trying to access xpath {}. It should get None'.format(user_name, role, xpath))
+                    yang_module, get_type = [yang_module for xpath_tuple, yang_module in xpath_module_mapping.items() 
+                                                                                            if xpath in xpath_tuple][0]
+                    user_pxy = user_session.proxy(yang_module)
+                    get_data_func = getattr(user_pxy, get_type)
+                    assert get_data_func(project_keyed_xpath.format(project_name=quoted_key(project_name))+xpath) is None
diff --git a/rwlaunchpad/ra/pytest/ns/rbac/test_rbac_roles.py b/rwlaunchpad/ra/pytest/ns/rbac/test_rbac_roles.py
new file mode 100644
index 0000000..2e0cb41
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/rbac/test_rbac_roles.py
@@ -0,0 +1,1220 @@
+#!/usr/bin/env python3
+"""
+#
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+"""
+import collections
+import gi
+import pytest
+import random
+import uuid
+
+import rift.auto.mano
+import rift.auto.descriptor
+gi.require_version('RwUserYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwlogMgmtYang', '1.0')
+from gi.repository import (
+    RwUserYang,
+    RwProjectYang,
+    RwConmanYang,
+    RwProjectVnfdYang,
+    RwProjectNsdYang,
+    RwNsrYang,
+    RwVnfrYang,
+    RwVlrYang,
+    RwRbacPlatformYang,
+    RwlogMgmtYang,
+    RwRedundancyYang,
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+SESSION_CONNECT_TIMEOUT=5
+
+@pytest.fixture(scope='session')
+def user_test_roles():
+    """Returns tuples of roles which enable an user to delete/create a new user"""
+    write_roles = ('rw-rbac-platform:super-admin', 'rw-rbac-platform:platform-admin')
+    read_roles = tuple()
+    return write_roles, read_roles
+
+
+@pytest.fixture(scope='session')
+def project_test_roles():
+    """Returns tuples of roles which enable an user to create, read, delete a project"""
+    write_roles = ('rw-rbac-platform:super-admin', )
+    read_roles = ('rw-project:project-oper', 'rw-project:project-admin')
+    return write_roles, read_roles
+
+
+@pytest.fixture(scope='session')
+def onboarding_test_roles():
+    """Fixture that returns a tuple of roles which enable an user to onboard/modify/delete a VNF/NS package"""
+    write_roles = ('rw-rbac-platform:super-admin', 'rw-project-mano:catalog-admin', 'rw-project:project-admin')
+    read_roles = ('rw-project-mano:catalog-oper', 'rw-project-mano:lcm-admin')
+    return write_roles, read_roles
+
+
+@pytest.fixture(scope='session')
+def account_test_roles():
+    """Fixture that returns a tuple of roles which enable an user to CRUD a VIM, Sdn account"""
+    write_roles = ('rw-rbac-platform:super-admin', 'rw-project-mano:account-admin', 'rw-project:project-admin')
+    read_roles = ('rw-project-mano:account-oper', 'rw-project-mano:lcm-admin')
+    return write_roles, read_roles
+
+
+@pytest.fixture(scope='session')
+def ns_instantiate_test_roles():
+    """Fixture that returns a tuple of roles which enable an user to instantiate/terminate a NS
+    Read roles: who all can access vnfr-catalog, vnfr-console, ns-instance-opdata etc"""
+    write_roles = ('rw-rbac-platform:super-admin', 'rw-project-mano:lcm-admin', 'rw-project:project-admin')
+    read_roles = ('rw-project-mano:lcm-oper', )
+    return write_roles, read_roles
+
+
+@pytest.fixture(scope='session')
+def syslog_server_test_roles():
+    """Fixture that returns a tuple of roles which enable an user set the syslog server_address"""
+    write_roles = ('rw-rbac-platform:super-admin', 'rw-rbac-platform:platform-admin', 'rw-rbac-platform:platform-oper')
+    read_roles = tuple()
+    return write_roles, read_roles
+
+
+@pytest.fixture(scope='session')
+def redundancy_config_test_roles():
+    """Fixture that returns a tuple of roles which enable an user set the syslog server_address"""
+    write_roles = ('rw-rbac-platform:super-admin', 'rw-rbac-platform:platform-admin')
+    read_roles =  ('rw-rbac-platform:platform-oper', )
+    return write_roles, read_roles
+
+
+@pytest.fixture(scope='session')
+def project_acessible():
+    """Fixture that returns name of the project to which all new users will be associated"""
+    return random.choice(['project1', 'default'])
+
+
+# @pytest.fixture(scope='session')
+# def project_not_accessible():
+#   """Retruns name of the project whose users are not supposed to access the resources under project 'project_acessible'"""
+#   return 'project2'
+
+
+@pytest.fixture(scope='session')
+def users_test_data(rw_user_proxy, rbac_platform_proxy, rw_project_proxy, all_roles, user_test_roles, project_test_roles,
+    onboarding_test_roles, account_test_roles, ns_instantiate_test_roles, user_domain, project_acessible, rw_conman_proxy,
+    syslog_server_test_roles, all_roles_combinations, rw_rbac_int_proxy, tbac, redundancy_config_test_roles):
+    """Creates new users required for a test and assign appropriate roles to them"""
+    if pytest.config.getoption("--user-creation-test"):
+        test_roles = user_test_roles
+    elif pytest.config.getoption("--project-creation-test"):
+        test_roles = project_test_roles
+    elif pytest.config.getoption("--onboarding-test"):
+        test_roles = onboarding_test_roles
+    elif pytest.config.getoption("--account-test"):
+        test_roles = account_test_roles
+    elif pytest.config.getoption("--nsr-test"):
+        test_roles = ns_instantiate_test_roles
+    elif pytest.config.getoption("--syslog-server-test"):
+        test_roles = syslog_server_test_roles
+    elif pytest.config.getoption("--redundancy-role-test"):
+        test_roles = redundancy_config_test_roles
+
+    # Create a project to which these users will be part of
+    if project_acessible != 'default':
+        rift.auto.mano.create_project(rw_conman_proxy, project_acessible)
+
+    def create_user_assign_role(user_name, password, role_set):
+        rift.auto.mano.create_user(rw_user_proxy, user_name, password, user_domain)
+        project_roles_list, platform_roles_list = [], []
+        for role in role_set:
+            if 'platform' in role:
+                platform_roles_list.append(role)
+            else:
+                project_roles_list.append(role)
+        if platform_roles_list:
+            rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, platform_roles_list, user_name, user_domain, rw_rbac_int_proxy)
+        if project_roles_list:
+            rift.auto.mano.assign_project_role_to_user(rw_project_proxy, project_roles_list, user_name,
+                                                       project_acessible, user_domain, rw_rbac_int_proxy)
+
+    write_roles, read_roles = test_roles
+    fail_roles = [role for role in all_roles if role not in write_roles]
+
+    if False: #If its desired to run checks for all combinations, tbd on what option this will be enabled
+        write_roles_tmp, read_roles_tmp, fail_roles_tmp = [], [], []
+        for role_combination in all_roles_combinations:
+            if bool(set(role_combination).intersection(write_roles)):
+                write_roles_tmp.append(role_combination)
+                continue
+            if bool(set(role_combination).intersection(read_roles)):
+                read_roles_tmp.append(role_combination)
+                continue
+            if bool(set(role_combination).isdisjoint(write_roles)):
+                fail_roles_tmp.append(role_combination)
+        write_roles, read_roles, fail_roles = write_roles_tmp, read_roles_tmp, fail_roles_tmp
+
+    # Create the users with roles mapped
+    write_users, read_users, fail_users = dict(), dict(), dict()
+    for idx, role_set in enumerate(write_roles, 1):
+        if type(role_set) is str:
+            role_set = [role_set]
+        user_name = 'write-{}'.format(idx)
+        if tbac:
+            password=user_name
+        else:
+            password = rift.auto.mano.generate_password()
+        create_user_assign_role(user_name, password, role_set)
+        write_users[user_name] = (role_set, password)
+
+    for idx, role_set in enumerate(read_roles, 1):
+        if type(role_set) is str:
+            role_set = [role_set]
+        user_name = 'read-{}'.format(idx)
+        if tbac:
+            password=user_name
+        else:
+            password = rift.auto.mano.generate_password()
+        create_user_assign_role(user_name, password, role_set)
+        read_users[user_name] = (role_set, password)
+
+    for idx, role_set in enumerate(fail_roles, 1):
+        if type(role_set) is str:
+            role_set = [role_set]
+        user_name = 'fail-{}'.format(idx)
+        if tbac:
+            password=user_name
+        else:
+            password = rift.auto.mano.generate_password()
+        create_user_assign_role(user_name, password, role_set)
+        fail_users[user_name] = (role_set, password)
+    return write_users, read_users, fail_users
+
+
+@pytest.mark.setup('test_rbac_roles_setup')
+@pytest.mark.incremental
+class TestRbacVerification(object):
+    @pytest.mark.skipif(not pytest.config.getoption("--project-creation-test"), reason="need --project-creation-test option to run")
+    def test_project_create_delete_authorization(self, logger, users_test_data, session_class, confd_host, rw_conman_proxy,
+                                                        project_keyed_xpath, project_acessible):
+        """Verifies only users with certain roles can create/delete a project"""
+
+        write_users, read_users, fail_users = users_test_data
+
+        # Check users in write_users dict able to create/delete a project
+        logger.debug('Verifying users which are authorised to create/delete a project')
+        for user in write_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, write_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, write_users[user][1])
+            pxy = user_session.proxy(RwProjectYang)
+
+            project_name = 'project-{}'.format(user)
+            logger.debug('Trying to create project {}'.format(project_name))
+            rift.auto.mano.create_project(pxy, project_name)
+
+            logger.debug('Trying to delete project {}'.format(project_name))
+            rift.auto.mano.delete_project(pxy, project_name)
+
+            rift.auto.mano.close_session(user_session)
+
+        # Check users in read_users dict able to read a project
+        logger.debug('Verifying users which are authorised to read a project')
+        for user in read_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, read_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, read_users[user][1])
+            pxy = user_session.proxy(RwProjectYang)
+
+            logger.debug('User {} trying to read project {}'.format(user, project_acessible))
+            project_ = pxy.get_config(project_keyed_xpath.format(project_name=quoted_key(project_acessible))+'/project-state', list_obj=True)
+            assert project_
+
+            rift.auto.mano.close_session(user_session)
+
+        # Check users in fail_users dict shouldn't be allowed to create a project or delete a project
+
+        # 'project-admin' user not able to create a project, but can delete a project, hence do the create/delete
+        # operation for this user at the end
+        fail_users_reordered = collections.OrderedDict()
+        for user, role_passwd_tuple in fail_users.items():
+            if any('project-admin' in role for role in role_passwd_tuple[0]):
+                project_admin_key, project_admin_val = user, role_passwd_tuple
+                continue
+            fail_users_reordered[user] = role_passwd_tuple
+        fail_users_reordered[project_admin_key] = project_admin_val
+
+        logger.debug('Verifying users which are not supposed to create/delete a project')
+        for user in fail_users_reordered:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users_reordered[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users_reordered[user][1])
+            pxy = user_session.proxy(RwProjectYang)
+
+            project_name = 'project-{}'.format(user)
+
+            with pytest.raises(Exception, message='User {} not authorised to create project {}'.format(
+                                                        user, project_name)) as excinfo:
+                logger.debug('User {} trying to create project {}'.format(user, project_name))
+                rift.auto.mano.create_project(pxy, project_name)
+
+            logger.debug('User {} trying to delete project {}'.format(user, project_acessible))
+            if any('project-admin' in role for role in fail_users_reordered[user][0]):
+                rift.auto.mano.delete_project(pxy, project_acessible)
+                continue
+            with pytest.raises(Exception, message='User {} not authorised to delete project {}'.format(
+                                                        user, project_acessible)) as excinfo:
+                rift.auto.mano.delete_project(pxy, project_acessible)
+
+            rift.auto.mano.close_session(user_session)
+
+    def delete_user_from_project(
+            self, project_proxy, target_user, target_project, user_domain):
+        project_xpath = (
+            "/project[name={project}]/project-config/user" +
+            "[user-name={user}][user-domain={domain}]"
+        )
+        # Check if the user exists for the project
+        ret_val = project_proxy.get_config(
+            project_xpath.format(
+                project=quoted_key(target_project),
+                user=quoted_key(target_user),
+                domain=quoted_key(user_domain)))
+        
+        assert ret_val
+        # Delete the target_user from the target_project
+        project_proxy.delete_config(
+            project_xpath.format(
+                project=quoted_key(target_project),
+                user=quoted_key(target_user),
+                domain=quoted_key(user_domain))
+        )
+        # Verify that he is deleted
+        ret_val = project_proxy.get_config(
+            project_xpath.format(
+                project=quoted_key(target_project),
+                user=quoted_key(target_user),
+                domain=quoted_key(user_domain))
+        )
+        assert ret_val is None
+
+    @pytest.mark.skipif(
+        not pytest.config.getoption("--project-creation-test"),
+        reason="need --project-creation-test option to run")
+    def test_project_admin_users_role_authorization(
+            self, logger, user_roles, rw_user_proxy, session_class,
+            user_domain, confd_host, rw_conman_proxy, project_keyed_xpath,
+            rw_project_proxy, rw_rbac_int_proxy, tbac):
+        """Verify project admin & oper role operations on a single project."""
+        logger.debug(
+            "Create a project & 8 users each with its own project/mano role")
+        rift.auto.mano.create_project(rw_conman_proxy, 'project-vzw')
+        project_user_data = {}
+        for idx, role in enumerate(user_roles, 1):
+            user_name = 'project_vzw_user-{}'.format(idx)
+            if not tbac:
+                password = rift.auto.mano.generate_password()
+            else:
+                password = user_name
+            rift.auto.mano.create_user(
+                rw_user_proxy, user_name, password, user_domain)
+            rift.auto.mano.assign_project_role_to_user(
+                rw_project_proxy, role, user_name, 'project-vzw',
+                user_domain, rw_rbac_int_proxy)
+            project_user_data[user_name] = {"role": role, "password": password}
+            if "project-admin" in role:
+                project_admin_user = user_name
+
+        logger.debug("Project admin deleting roles from users.")
+        project_admin_session = rift.auto.mano.get_session(
+            session_class, confd_host, project_admin_user,
+            project_user_data[project_admin_user]["password"])
+        project_admin_proxy = project_admin_session.proxy(RwProjectYang)
+        for user in project_user_data:
+            role = project_user_data[user]["role"]
+            if project_admin_user == user:
+                continue
+            rift.auto.mano.revoke_project_role_from_user(
+                project_admin_proxy, role, user, 'project-vzw', user_domain)
+        rift.auto.mano.close_session(project_admin_session)
+
+        logger.debug("Verify project admin can assign another role to users")
+        project_admin_session = rift.auto.mano.get_session(
+            session_class, confd_host, project_admin_user,
+            project_user_data[project_admin_user]["password"])
+        project_admin_proxy = project_admin_session.proxy(RwProjectYang)
+        for user in project_user_data:
+            role = 'rw-project:project-oper'
+            if project_admin_user == user:
+                continue
+            rift.auto.mano.assign_project_role_to_user(
+                project_admin_proxy, role, user, 'project-vzw',
+                user_domain, rw_rbac_int_proxy)
+            rift.auto.mano.close_session(project_admin_session)
+
+        # Verify the user able to read project
+        for user in project_user_data:
+            user_session = rift.auto.mano.get_session(
+                session_class, confd_host, user,
+                project_user_data[user]["password"])
+            user_project_pxy = user_session.proxy(RwProjectYang)
+            logger.debug("verifying user able to read project")
+            xpath = "/project[name={project}]/project-config"
+            ret_val = user_project_pxy.get_config(
+                xpath.format(project=quoted_key('project-vzw')))
+            assert ret_val
+            rift.auto.mano.close_session(user_session)
+
+        logger.debug("Verify if project admin can replace roles for users")
+        project_admin_session = rift.auto.mano.get_session(
+            session_class, confd_host, project_admin_user,
+            project_user_data[project_admin_user]["password"])
+        project_admin_proxy = project_admin_session.proxy(RwProjectYang)
+        for user in project_user_data:
+            if project_admin_user != user:
+                xpath = (
+                    "/project[name={project}]/project-config/user" +
+                    "[user-name={user}][user-domain={domain}]")
+                new_role = (
+                    RwProjectYang.
+                    YangData_RwProject_Project_ProjectConfig_User_Role.
+                    from_dict({
+                        'role': 'rw-project-mano:account-admin'})
+                )
+                project_admin_proxy.replace_config(
+                    xpath.format(
+                        project=quoted_key('project-vzw'),
+                        user=quoted_key(user),
+                        domain=quoted_key(user_domain)), new_role)
+                ret_val = project_admin_proxy.get_config(
+                    xpath.format(
+                        project=quoted_key('project-vzw'),
+                        user=quoted_key(user),
+                        domain=quoted_key(user_domain),
+                        role=quoted_key('rw-project-mano:lcm-oper')))
+                assert ret_val
+            rift.auto.mano.close_session(project_admin_session)
+
+        logger.debug("Verify if users able to change its own user details")
+        for user in project_user_data:
+            if tbac:
+                break
+            password = project_user_data[user]["password"]
+            user_session = rift.auto.mano.get_session(
+                session_class, confd_host, user, password)
+            user_proxy = user_session.proxy(RwUserYang)
+            rift.auto.mano.update_password(
+                user_proxy, user, user, user_domain, rw_rbac_int_proxy)
+            project_user_data[user]["new_password"] = user
+            rift.auto.mano.close_session(user_session)
+
+            logger.debug(
+                "{} trying to connect ".format(user) +
+                "with its old password {}".format(password)
+            )
+
+            message = ('{} not supposed to '.format(user) +
+                       'log-in with old passwd {}'.format(password))
+            with pytest.raises(Exception, message=message):
+                rift.auto.mano.get_session(
+                    session_class, confd_host, user,
+                    password, timeout=SESSION_CONNECT_TIMEOUT)
+
+            # Verify the user should be able to log-in with new password
+            logger.debug(
+                "User {} trying to log-in with its updated password {}".format(
+                    user, project_user_data[user]["new_password"]))
+
+            usession_updated_passwd = rift.auto.mano.get_session(
+                session_class, confd_host, user,
+                project_user_data[user]["new_password"])
+
+        # project admin able to delete users from the project database
+        if tbac:
+            password = project_user_data[project_admin_user]["password"]
+        else:
+            password = project_user_data[project_admin_user]["new_password"]
+        project_admin_session = rift.auto.mano.get_session(
+            session_class, confd_host, project_admin_user, password)
+        project_admin_proxy = project_admin_session.proxy(RwProjectYang)
+
+        for user in project_user_data:
+            if user == project_admin_user:
+                continue
+            logger.debug('deleting user {} from project project-vzw'.format(user))
+            self.delete_user_from_project(
+                project_admin_proxy, user, 'project-vzw', user_domain)
+            rift.auto.mano.close_session(project_admin_session)
+
+    @pytest.mark.skipif(
+        not pytest.config.getoption("--project-creation-test"),
+        reason="need --project-creation-test option to run")
+    def test_multi_project_multi_users_role_authorization(
+            self, logger, user_roles, rw_user_proxy, session_class,
+            user_domain, confd_host, rw_conman_proxy, project_keyed_xpath,
+            rw_project_proxy, rw_rbac_int_proxy, tbac, rbac_user_passwd):
+        """Verify that users with roles doesn't have unauthorized access."""
+        """
+        Case 01. rbac_user2 has different roles in project1 and project2.
+        Case 02. rbac_user4 has project-admin in project3 and project4.
+        Case 03. rbac_user9 has project-oper in project5 and project6.
+        """
+
+        # The sample user data
+        role1 = 'rw-project:project-admin'
+        role2 = 'rw-project:project-oper'
+        project_user_data = {
+            "project1": {
+                "rbac_user1": role1,
+                "rbac_user2": role2,
+            },
+            "project2": {
+                "rbac_user2": role1,
+                "rbac_user3": role2,
+            },
+            "project3": {
+                "rbac_user4": role1,
+                "rbac_user5": role2,
+
+            },
+            "project4": {
+                "rbac_user4": role1,
+                "rbac_user6": role2,
+            },
+            "project5": {
+                "rbac_user7": role1,
+                "rbac_user9": role2,
+            },
+            "project6": {
+                "rbac_user8": role1,
+                "rbac_user9": role2,
+            }
+        }
+        # Create projects
+        for idx in range(1, 7):
+            rift.auto.mano.create_project(
+                rw_conman_proxy, 'project{}'.format(idx))
+        # Create users
+        for idx in range(1, 10):
+            rift.auto.mano.create_user(
+                rw_user_proxy, 'rbac_user{}'.format(idx),
+                rbac_user_passwd, user_domain)
+        # Assign roles to users according to the project_user_data
+        for idx in range(1, 7):
+            project = 'project{}'.format(idx)
+            for user_name, role in project_user_data[project].items():
+                rift.auto.mano.assign_project_role_to_user(
+                    rw_project_proxy, role, user_name, project,
+                    user_domain, rw_rbac_int_proxy)
+
+        def project_access(
+                user_name, target_project, session_class,
+                confd_host, logger):
+            """Verify if user has access to target project."""
+            password = rbac_user_passwd
+            if tbac:
+                password = user_name
+            user_session = rift.auto.mano.get_session(
+                session_class, confd_host, user_name, password)
+            logger.debug("{} trying to access {}".format(
+                user_name, target_project) +
+                "/project-state"
+            )
+            pxy = user_session.proxy(RwProjectYang)
+            # Verify is user has access to /project
+            project_xpath = '/project[name={}]/project-state'.format(
+                quoted_key(target_project)
+            )
+            response = pxy.get_config(project_xpath, list_obj=True)
+            assert response
+            # Verify is user has access to /project/project-config/user
+            project_user_xpath = (
+                "/project[name={project}]/project-config/" +
+                "user[user-name={user}][user-domain={domain}]"
+            )
+            target_user = list(project_user_data[target_project].keys())[0]
+            pxy = user_session.proxy(RwProjectYang)
+            response = pxy.get_config(
+                project_user_xpath.format(
+                    project=quoted_key(target_project),
+                    user=quoted_key(target_user),
+                    domain=quoted_key(user_domain)
+                )
+            )
+            assert response
+            rift.auto.mano.close_session(user_session)
+
+        # Case 01. rbac_user2 has different roles in project1 and project2.
+
+        logger.debug('Veryfy rbac_user1 of project1 has no access to project2')
+        with pytest.raises(
+                Exception,
+                message="rbac_user1 accessed project2 which its not part of."):
+            project_access(
+                'rbac_user1', 'project2', session_class, confd_host, logger)
+
+        logger.debug('Verify rbac_user2 has access to project1 and project2')
+        project_access(
+            'rbac_user2', 'project1', session_class, confd_host, logger)
+        project_access(
+            'rbac_user2', 'project2', session_class, confd_host, logger)
+
+        # Case 02. rbac_user4 has project-admin in project3 and project4.
+
+        logger.debug('Verify rbac_user4 has access to project 3 & project4')
+        project_access(
+            'rbac_user4', 'project4', session_class, confd_host, logger)
+        project_access(
+            'rbac_user4', 'project3', session_class, confd_host, logger)
+
+        logger.debug('Two users in project3 exchanges roles & check access')
+        rift.auto.mano.revoke_project_role_from_user(
+            rw_project_proxy, role1, 'rbac_user4',
+            'project3', user_domain)
+        rift.auto.mano.revoke_project_role_from_user(
+            rw_project_proxy, role2, 'rbac_user5',
+            'project3', user_domain)
+        rift.auto.mano.assign_project_role_to_user(
+            rw_project_proxy, role2, 'rbac_user4',
+            'project3', user_domain, rw_rbac_int_proxy)
+        rift.auto.mano.assign_project_role_to_user(
+            rw_project_proxy, role1, 'rbac_user5',
+            'project3', user_domain, rw_rbac_int_proxy)
+
+        logger.debug('rbac_user5 trying its access on project3 and project4')
+        project_access(
+            'rbac_user5', 'project3', session_class,
+            confd_host, logger
+        )
+        with pytest.raises(
+                Exception,
+                message="rbac_user5 accessed project4 which its not part of."):
+            project_access(
+                'rbac_user5', 'project4', session_class,
+                confd_host, logger
+            )
+
+        # 'rbac_user5'(admin role) revoking the role from rbac-user4
+        password = rbac_user_passwd
+        if tbac:
+            password = 'rbac_user5'
+        rbac_user2_session = rift.auto.mano.get_session(
+            session_class, confd_host, 'rbac_user5', password)
+        rbac_user2_prjt_pxy = rbac_user2_session.proxy(RwProjectYang)
+        self.delete_user_from_project(
+            rbac_user2_prjt_pxy, 'rbac_user4', 'project3', user_domain)
+
+        # Create new user 'del-user'
+        rift.auto.mano.create_user(
+            rw_user_proxy, 'del-user', rbac_user_passwd, user_domain)
+        rift.auto.mano.assign_project_role_to_user(
+            rw_project_proxy, role2, 'del-user', 'project3',
+            user_domain, rw_rbac_int_proxy)
+        # Delete 'del-user' with 'rbac_user5'(admin role)
+        self.delete_user_from_project(
+            rbac_user2_prjt_pxy, 'del-user', 'project3', user_domain)
+
+        logger.debug(
+            'rbac_user4 try to access project3 which its not a part of anymore'
+        )
+        with pytest.raises(
+                Exception,
+                message="rbac_user4 accessed project3 which its not part of."):
+            project_access(
+                'rbac_user4', 'project3', session_class,
+                confd_host, logger)
+
+        logger.debug('rbac_user4 try to access project4 which its a part of.')
+        project_access(
+            'rbac_user4', 'project4', session_class,
+            confd_host, logger)
+
+        # Case 03. rbac_user9 has project-oper in project5 and project6.
+
+        logger.debug('rbac_user9 try to access project5 & project6')
+        project_access(
+            'rbac_user9', 'project5', session_class,
+            confd_host, logger)
+        project_access(
+            'rbac_user9', 'project6', session_class,
+            confd_host, logger)
+
+        logger.debug(
+            'rbac_user8 try to access to project5 which its not part of.'
+        )
+        with pytest.raises(
+                Exception,
+                message="rbac_user8 accessed project5 which its not part of."):
+            project_access(
+                'rbac_user8', 'project5', session_class,
+                confd_host, logger)
+
+        logger.debug(
+            'rbac_user7 try to access to project6 which its not part of.'
+        )
+        with pytest.raises(
+                Exception,
+                message="rbac_user7 accessed project6 which its not part of."):
+            project_access(
+                'rbac_user7', 'project6', session_class,
+                confd_host, logger)
+
+
+    @pytest.mark.skipif(not pytest.config.getoption("--user-creation-test"), reason="need --user-creation-test option to run")
+    def test_user_create_delete_authorization(self, logger, users_test_data, session_class, confd_host, rw_user_proxy,
+                        rbac_user_passwd, user_domain, tbac, rw_rbac_int_proxy):
+        """Verifies only users with certain roles can create/delete users and set the password of an user"""
+        write_users, read_users, fail_users = users_test_data
+
+        # Create a dummy user with admin/admin
+        dummy_user_name = 'dummy-user'
+        rift.auto.mano.create_user(rw_user_proxy, dummy_user_name, rbac_user_passwd, user_domain)
+
+        # Check users in write_users dict able to create/delete an user and able to set password for others
+        logger.debug('Verifying users which are authorised to create/delete an user')
+        for user in write_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, write_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, write_users[user][1])
+            pxy = user_session.proxy(RwUserYang)
+
+            user_name = 'new-user-{}'.format(user)
+            logger.debug('Trying to create user {}'.format(user_name))
+            rift.auto.mano.create_user(pxy, user_name, rbac_user_passwd, user_domain)
+
+            logger.debug('Trying to delete user {}'.format(user_name))
+            rift.auto.mano.delete_user(pxy, user_name, user_domain)
+
+            if not tbac:    # password update is not allowed for external users in tbac
+                new_passwd = rift.auto.mano.generate_password()
+                # Check users in write_users dict able to set password for other user (dummy-user)
+                logger.debug('User {} trying to update password for user {}'.format(user, dummy_user_name))
+                rift.auto.mano.update_password(pxy, dummy_user_name, new_passwd, user_domain, rw_rbac_int_proxy)
+
+                # Verify dummy_user_name able to log-in with its new password
+                logger.debug('User {} trying to log-in with its updated password {}'.format(dummy_user_name, new_passwd))
+                dummy_user_session_updated_passwd = rift.auto.mano.get_session(session_class, confd_host, dummy_user_name,
+                                                                new_passwd)
+
+                # Verify the user not able to log-in with old password
+                with pytest.raises(Exception, message='User {} not supposed to log-in with its old password {}'.format(
+                                                                dummy_user_name, rbac_user_passwd)) as excinfo:
+                    logger.debug('User {} trying to connect with its old password {}'.format(user, rbac_user_passwd))
+                    rift.auto.mano.get_session(session_class, confd_host, dummy_user_name, rbac_user_passwd,
+                                        timeout=SESSION_CONNECT_TIMEOUT)
+
+                rift.auto.mano.close_session(dummy_user_session_updated_passwd)
+            rift.auto.mano.close_session(user_session)
+
+        # Check users in read_users dict able to read user list (path: /user-config)
+        logger.debug('Verifying users which are authorised to read user list')
+        for user in read_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, read_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, read_users[user][1])
+            pxy = user_session.proxy(RwUserYang)
+            logger.debug('User {} trying to access /user-config xpath'.format(user))
+            user_config = pxy.get_config('/user-config')
+            assert [user.user_name for user in user_config.user]
+
+            rift.auto.mano.close_session(user_session)
+
+        # Check users in fail_users dict not able to create/delete an user
+        logger.debug('Verifying users which are not supposed to create/delete an user')
+        for user in fail_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users[user][1])
+            pxy = user_session.proxy(RwUserYang)
+
+            user_name = 'new-user-{}'.format(user)
+
+            with pytest.raises(Exception, message='User {} not authorised to create user {}'.format(
+                                                    user, user_name)) as excinfo:
+                logger.debug('User {} trying to create an user {}'.format(user, user_name))
+                rift.auto.mano.create_user(pxy, user_name, rbac_user_passwd, user_domain)
+
+            with pytest.raises(Exception, message='User {} not authorised to delete user {}'.format(
+                                                    user, dummy_user_name)) as excinfo:
+                logger.debug('User {} trying to delete user {}'.format(user, dummy_user_name))
+                rift.auto.mano.delete_user(pxy, dummy_user_name, user_domain)
+
+            rift.auto.mano.close_session(user_session)
+
+        if not tbac:    # password update is not allowed for external users in tbac
+            # Check all users able to set their own password
+            logger.debug('Verifying an user able to set its own password')
+            for user, role_passwd_tuple in dict(write_users, **dict(read_users, **fail_users)).items():
+                logger.debug('Verifying user:(role,password) {}:{}'.format(user, role_passwd_tuple))
+                user_session = rift.auto.mano.get_session(session_class, confd_host, user, role_passwd_tuple[1])
+                pxy = user_session.proxy(RwUserYang)
+
+                new_passwd = rift.auto.mano.generate_password()
+                logger.debug('User {} trying to update its password to {}'.format(user, new_passwd))
+                rift.auto.mano.update_password(pxy, user, new_passwd, user_domain, rw_rbac_int_proxy)
+
+                # Verify the user should be able to log-in with new password
+                logger.debug('User {} trying to log-in with its updated password {}'.format(user, new_passwd))
+                user_session_updated_passwd = rift.auto.mano.get_session(session_class, confd_host, user, new_passwd)
+
+                # Verify the user not able to log-in with old password
+                with pytest.raises(Exception, message='User {} not supposed to log-in with its old password {}'.format(
+                                                                        user, role_passwd_tuple[1])) as excinfo:
+                    logger.debug('User {} trying to connect with its old password {}'.format(user, role_passwd_tuple[1]))
+                    rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd, timeout=SESSION_CONNECT_TIMEOUT)
+
+                rift.auto.mano.close_session(user_session)
+                rift.auto.mano.close_session(user_session_updated_passwd)
+
+
+    @pytest.mark.skipif(not pytest.config.getoption("--account-test"), reason="need --account-test option to run")
+    def test_account_create_delete_authorization(self, users_test_data, mgmt_session, logger, cloud_module, fmt_cloud_xpath,
+                            fmt_prefixed_cloud_xpath, project_acessible, cloud_account, session_class, confd_host):
+        """Verifies only users with certain roles can create/read/delete cloud, sdn accounts"""
+        write_users, read_users, fail_users = users_test_data
+        xpath_no_pfx = fmt_cloud_xpath.format(project=quoted_key(project_acessible), account_name=quoted_key(cloud_account.name))
+        xpath = fmt_prefixed_cloud_xpath.format(project=quoted_key(project_acessible), account_name=quoted_key(cloud_account.name))
+
+        # Check users in write_users dict able to create/delete cloud accounts
+        logger.debug('Verifying users which are authorised to create/delete cloud accounts')
+        for user in write_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, write_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, write_users[user][1])
+            cloud_pxy = user_session.proxy(cloud_module)
+
+            logger.debug('Trying to create a cloud account')
+            cloud_pxy.replace_config(xpath, cloud_account)
+            response =  cloud_pxy.get(xpath_no_pfx)
+            assert response.name == cloud_account.name
+            assert response.account_type == cloud_account.account_type
+
+            logger.debug('Trying to delete the cloud account')
+            cloud_pxy.delete_config(xpath)
+            assert cloud_pxy.get(xpath_no_pfx) is None
+
+            rift.auto.mano.close_session(user_session)
+
+        # admin user creating a cloud account which read_users will be trying to read
+        logger.debug('admin user creating cloud account {}'.format(cloud_account.name))
+        admin_cloud_proxy = mgmt_session.proxy(cloud_module)
+        admin_cloud_proxy.replace_config(xpath, cloud_account)
+        assert admin_cloud_proxy.get(xpath_no_pfx).name == cloud_account.name
+
+        # Check users in read_users dict able to read cloud accounts
+        logger.debug('Verifying users which are authorised to read cloud accounts')
+        for user in read_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, read_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, read_users[user][1])
+            cloud_pxy = user_session.proxy(cloud_module)
+
+            response =  cloud_pxy.get(xpath_no_pfx)
+            assert response.name == cloud_account.name
+            assert response.account_type == cloud_account.account_type
+
+            rift.auto.mano.close_session(user_session)
+
+        # Check users in fail_users dict not able to delete/read cloud accounts
+        logger.debug('Verifying users which are not authorised to read/delete cloud accounts')
+        for user in fail_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users[user][1])
+            cloud_pxy = user_session.proxy(cloud_module)
+
+            with pytest.raises(Exception, message='User {} not authorised to delete cloud account {}'.format(
+                                                user, cloud_account.name)) as excinfo:
+                logger.debug('User {} trying to delete cloud account {}'.format(user, cloud_account.name))
+                cloud_pxy.delete_config(xpath)
+
+            # logger.debug('User {} trying to access cloud account {}'.format(user, cloud_account.name))
+            # assert cloud_pxy.get(xpath_no_pfx) is None
+            rift.auto.mano.close_session(user_session)
+
+        # admin user deleting the cloud account
+        logger.debug('admin user deleting cloud account {}'.format(cloud_account.name))
+        admin_cloud_proxy.delete_config(xpath)
+        assert admin_cloud_proxy.get(xpath_no_pfx) is None
+
+        # Check users in fail_users dict not able to create cloud accounts
+        logger.debug('Verifying users which are not authorised to create cloud accounts')
+        for user in fail_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users[user][1])
+            cloud_pxy = user_session.proxy(cloud_module)
+
+            with pytest.raises(Exception, message='User {} not authorised to create cloud account {}'.format(
+                                                user, cloud_account.name)) as excinfo:
+                logger.debug('User {} trying to create a cloud account {}'.format(user, cloud_account.name))
+                cloud_pxy.replace_config(xpath, cloud_account)
+
+            rift.auto.mano.close_session(user_session)
+
+    @staticmethod
+    def delete_descriptors(project, vnfd_proxy, nsd_proxy, vnfd_xpath, nsd_xpath, fmt_vnfd_id_xpath, fmt_nsd_id_xpath):
+        nsds = nsd_proxy.get('{}/nsd'.format(nsd_xpath), list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = fmt_nsd_id_xpath.format(project=quoted_key(project), nsd_id=quoted_key(nsd.id))
+            nsd_proxy.delete_config(xpath)
+        nsds = nsd_proxy.get('{}/nsd'.format(nsd_xpath), list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get('{}/vnfd'.format(vnfd_xpath), list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = fmt_vnfd_id_xpath.format(project=quoted_key(project), vnfd_id=quoted_key(vnfd_record.id))
+            vnfd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get('{}/vnfd'.format(vnfd_xpath), list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
+
+    @pytest.mark.skipif(not pytest.config.getoption("--onboarding-test"), reason="need --onboarding-test option to run")
+    def test_onboarding_authorization(self, users_test_data, logger, descriptors, session_class, confd_host,
+            fmt_vnfd_catalog_xpath, fmt_nsd_catalog_xpath, fmt_nsd_id_xpath, fmt_vnfd_id_xpath, project_acessible, mgmt_session):
+        """Verifies only users with certain roles can onboard/update/delete a package"""
+
+        descriptor_vnfds, descriptor_nsd = descriptors[:-1], descriptors[-1]
+        write_users, read_users, fail_users = users_test_data
+        logger.debug('The descriptrs being used: {}'.format(descriptors))
+        nsd_xpath = fmt_nsd_catalog_xpath.format(project=quoted_key(project_acessible))
+        vnfd_xpath = fmt_vnfd_catalog_xpath.format(project=quoted_key(project_acessible))
+
+        def onboard(user_session, project):
+            for descriptor in descriptors:
+                rift.auto.descriptor.onboard(user_session, descriptor, project=project)
+
+        def verify_descriptors(vnfd_pxy, nsd_pxy, vnfd_count, nsd_count):
+            catalog = vnfd_pxy.get_config(vnfd_xpath)
+            actual_vnfds = catalog.vnfd
+            assert len(actual_vnfds) == vnfd_count, 'There should be {} vnfds'.format(vnfd_count)
+            catalog = nsd_pxy.get_config(nsd_xpath)
+            actual_nsds = catalog.nsd
+            assert len(actual_nsds) == nsd_count, 'There should be {} nsd'.format(nsd_count)
+
+        # Check users in write_users dict able to onboard/delete descriptors
+        logger.debug('Verifying users which are authorised to onboard/delete descriptors')
+        for user in write_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, write_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, write_users[user][1])
+            vnfd_pxy = user_session.proxy(RwProjectVnfdYang)
+            nsd_pxy = user_session.proxy(RwProjectNsdYang)
+            logger.debug('Trying to onboard ping-pong descriptors')
+            onboard(user_session, project_acessible)
+            logger.debug('Verifying if the descriptors are uploaded')
+            verify_descriptors(vnfd_pxy, nsd_pxy, len(descriptor_vnfds), 1)
+
+            logger.debug('Trying to delete descriptors')
+            TestRbacVerification.delete_descriptors(project_acessible, vnfd_pxy, nsd_pxy, vnfd_xpath, nsd_xpath,
+                                                    fmt_vnfd_id_xpath, fmt_nsd_id_xpath)
+
+            rift.auto.mano.close_session(user_session)
+
+        # onboard the descriptors using mgmt_session which read_users will try to read
+        logger.debug('admin user uploading the descriptors which read_users will try to read')
+        onboard(mgmt_session, project_acessible)
+        admin_vnfd_pxy = mgmt_session.proxy(RwProjectVnfdYang)
+        admin_nsd_pxy = mgmt_session.proxy(RwProjectNsdYang)
+        logger.debug('Verifying if the descriptors are uploaded')
+        verify_descriptors(admin_vnfd_pxy, admin_nsd_pxy, len(descriptor_vnfds), 1)
+
+        # Check users in read_users dict able to read already onboarded descriptors
+        logger.debug('Verifying users which are authorised to read descriptors')
+        for user in read_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, read_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, read_users[user][1])
+            vnfd_pxy = user_session.proxy(RwProjectVnfdYang)
+            nsd_pxy = user_session.proxy(RwProjectNsdYang)
+
+            logger.debug('Trying to read ping-pong descriptors')
+            verify_descriptors(vnfd_pxy, nsd_pxy, len(descriptor_vnfds), 1)
+
+            rift.auto.mano.close_session(user_session)
+
+        # Check users in fail_users dict not able to onboard/delete descriptors
+        logger.debug('Verifying users which are not supposed to delete descriptors')
+        for user in fail_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users[user][1])
+            vnfd_pxy = user_session.proxy(RwProjectVnfdYang)
+            nsd_pxy = user_session.proxy(RwProjectNsdYang)
+
+            with pytest.raises(Exception, message='User {} not authorised to delete descriptors'.format(user)) as excinfo:
+                logger.debug('User {} trying to delete descriptors'.format(user))
+                TestRbacVerification.delete_descriptors(project_acessible, vnfd_pxy, nsd_pxy, vnfd_xpath, nsd_xpath,
+                                                        fmt_vnfd_id_xpath, fmt_nsd_id_xpath)
+
+            rift.auto.mano.close_session(user_session)
+
+        logger.debug('Deleting the descriptors as fail_users trying to upload the descriptors')
+        TestRbacVerification.delete_descriptors(project_acessible, admin_vnfd_pxy, admin_nsd_pxy, vnfd_xpath, nsd_xpath,
+                                                fmt_vnfd_id_xpath, fmt_nsd_id_xpath)
+
+        logger.debug('Verifying users which are not supposed to create descriptors')
+        for user in fail_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users[user][1])
+            vnfd_pxy = user_session.proxy(RwProjectVnfdYang)
+            nsd_pxy = user_session.proxy(RwProjectNsdYang)
+
+            with pytest.raises(Exception, message='User {} not authorised to onboard descriptors'.format(user)) as excinfo:
+                logger.debug('User {} trying to onboard ping-pong descriptors'.format(user))
+                onboard(user_session)
+
+            rift.auto.mano.close_session(user_session)
+
+    @pytest.mark.skipif(not pytest.config.getoption("--nsr-test"),
+                        reason="need --nsr-test option to run")
+    def test_nsr_authorization(self, users_test_data, logger, cloud_account,
+                               cloud_module, descriptors, session_class,
+                               confd_host, fmt_cloud_xpath,
+                               fmt_prefixed_cloud_xpath, mgmt_session, fmt_nsd_id_xpath, fmt_vnfd_id_xpath,
+                               project_acessible, fmt_nsd_catalog_xpath, fmt_vnfd_catalog_xpath):
+        """Verifies only users with certain roles can
+        create/read/delete nsr/vlr/vnfr
+        """
+
+        descriptor_vnfds, descriptor_nsd = descriptors[:-1], descriptors[-1]
+        write_users, read_users, fail_users = users_test_data
+
+        # Cloud account creation
+        logger.debug('Creating a cloud account which will be used for NS instantiation')
+        cloud_pxy = mgmt_session.proxy(cloud_module)
+        cloud_pxy.replace_config(fmt_prefixed_cloud_xpath.format(project=quoted_key(project_acessible),
+                                                                 account_name=quoted_key(cloud_account.name)),
+                                 cloud_account)
+        response = cloud_pxy.get(
+            fmt_cloud_xpath.format(project=quoted_key(project_acessible), account_name=quoted_key(cloud_account.name)))
+        assert response.name == cloud_account.name
+
+        cloud_pxy.wait_for(fmt_cloud_xpath.format(project=quoted_key(project_acessible), account_name=quoted_key(
+            cloud_account.name)) + '/connection-status/status', 'success', timeout=30, fail_on=['failure'])
+
+        # Upload the descriptors
+        nsd_xpath = fmt_nsd_catalog_xpath.format(project=quoted_key(project_acessible))
+        vnfd_xpath = fmt_vnfd_catalog_xpath.format(project=quoted_key(project_acessible))
+        logger.debug('Uploading descriptors {} which will be used for NS instantiation'.format(descriptors))
+        for descriptor in descriptors:
+            rift.auto.descriptor.onboard(mgmt_session, descriptor, project=project_acessible)
+        admin_nsd_pxy = mgmt_session.proxy(RwProjectNsdYang)
+        nsd_catalog = admin_nsd_pxy.get_config(nsd_xpath)
+        assert nsd_catalog
+        nsd = nsd_catalog.nsd[0]
+        nsr = rift.auto.descriptor.create_nsr(cloud_account.name, nsd.name, nsd)
+
+        # Check users in write_users dict able to instantiate/delete a NS
+        logger.debug('Verifying users which are authorised to instantiate/delete a NS')
+        for user in write_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, write_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, write_users[user][1])
+            rwnsr_pxy = user_session.proxy(RwNsrYang)
+            rwvnfr_pxy = user_session.proxy(RwVnfrYang)
+            rwvlr_pxy = user_session.proxy(RwVlrYang)
+
+            logger.info("Trying to instantiate the Network Service")
+            rift.auto.descriptor.instantiate_nsr(nsr, rwnsr_pxy, logger,
+                                                 project=project_acessible)
+
+            logger.info("Trying to terminate the Network Service")
+            rift.auto.descriptor.terminate_nsr(rwvnfr_pxy, rwnsr_pxy,
+                                               rwvlr_pxy, logger,
+                                               project_acessible)
+
+        # Instantiate a NS which the read_users, fail_users will try to
+        # read/delete.
+        admin_rwnsr_pxy = mgmt_session.proxy(RwNsrYang)
+        admin_rwvnfr_pxy = mgmt_session.proxy(RwVnfrYang)
+        admin_rwvlr_pxy = mgmt_session.proxy(RwVlrYang)
+        logger.debug('admin user instantiating NS which the read_users, fail_users will try to read/delete.')
+        rift.auto.descriptor.instantiate_nsr(nsr, admin_rwnsr_pxy, logger, project=project_acessible)
+
+        # Check users in read_users, write_users dict able to read vnfr-console, vnfr-catalog, ns-instance-opdata
+        p_xpath = '/project[name={}]'.format(quoted_key(project_acessible))
+        read_xpaths = ['/ns-instance-opdata', '/vnfr-catalog', '/vnfr-console']
+        logger.debug('Verifying users which are authorised to read vnfr-catalog, ns-instance-opdata, vnfr-console etc')
+        for user, role_passwd_tuple in dict(write_users, **read_users).items():
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, role_passwd_tuple))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, role_passwd_tuple[1])
+            rwnsr_pxy = user_session.proxy(RwNsrYang)
+            rwvnfr_pxy = user_session.proxy(RwVnfrYang)
+            for xpath in read_xpaths:
+                logger.debug('Trying to read xpath: {}'.format(p_xpath+xpath))
+                proxy_ = rwvnfr_pxy if 'vnfr' in xpath else rwnsr_pxy
+                assert proxy_.get(p_xpath+xpath)
+
+            rift.auto.mano.close_session(user_session)
+
+        # Check users in fail_users dict not able to terminate a NS
+        logger.debug('Verifying users which are NOT authorised to terminate a NS')
+        for user in fail_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users[user][1])
+            rwnsr_pxy = user_session.proxy(RwNsrYang)
+            rwvnfr_pxy = user_session.proxy(RwVnfrYang)
+
+            with pytest.raises(Exception, message='User {} not authorised to terminate NS'.format(user)) as excinfo:
+                logger.debug('User {} trying to delete NS'.format(user))
+                rift.auto.descriptor.terminate_nsr(rwvnfr_pxy, rwnsr_pxy,
+                                                   logger, admin_rwvlr_pxy,
+                                                   project=project_acessible)
+            rift.auto.mano.close_session(user_session)
+
+        # Terminate the NS instantiated by admin user
+        logger.debug('admin user terminating the NS')
+        rift.auto.descriptor.terminate_nsr(admin_rwvnfr_pxy,
+                                           admin_rwnsr_pxy,
+                                           admin_rwvlr_pxy, logger,
+                                           project=project_acessible)
+
+        # Check users in fail_users dict not able to instantiate a NS
+        nsr.id = str(uuid.uuid4())
+        logger.debug('Verifying users which are NOT authorised to instantiate a NS')
+        for user in fail_users:
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users[user]))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users[user][1])
+            rwnsr_pxy = user_session.proxy(RwNsrYang)
+            rwvnfr_pxy = user_session.proxy(RwVnfrYang)
+
+            with pytest.raises(Exception, message='User {} not authorised to instantiate NS'.format(user)) as excinfo:
+                logger.debug('User {} trying to instantiate NS'.format(user))
+                rift.auto.descriptor.instantiate_nsr(nsr, rwnsr_pxy, logger, project=project_acessible)
+            rift.auto.mano.close_session(user_session)
+
+        # delete cloud accounts and descriptors; else deleting project in teardown fails
+        cloud_pxy.delete_config(fmt_prefixed_cloud_xpath.format(project=quoted_key(project_acessible), 
+                        account_name=quoted_key(cloud_account.name)))
+        admin_vnfd_pxy = mgmt_session.proxy(RwProjectVnfdYang)
+        TestRbacVerification.delete_descriptors(project_acessible, admin_vnfd_pxy, admin_nsd_pxy, vnfd_xpath, nsd_xpath,
+                                                fmt_vnfd_id_xpath, fmt_nsd_id_xpath)
+
+    @pytest.mark.skipif(not pytest.config.getoption("--syslog-server-test"), reason="need --syslog-server-test option to run")
+    def test_set_syslog_server_authorization(self, mgmt_session, users_test_data, session_class, confd_host, logger):
+        """Verifies only users with certain roles can set syslog server"""
+        write_users, read_users, fail_users = users_test_data
+        admin_log_mgmt_pxy = mgmt_session.proxy(RwlogMgmtYang)
+
+        def update_syslog_server_address(user_log_mgmt_pxy):
+            ip = '127.0.0.{}'.format(random.randint(0,255))
+            sink_obj = RwlogMgmtYang.Logging_Sink.from_dict({'server_address': ip})
+
+            syslog_name = admin_log_mgmt_pxy.get_config('/logging').sink[0].name
+            logger.debug('updating the syslog {} server_address to {}'.format(syslog_name, ip))
+            user_log_mgmt_pxy.merge_config('/logging/sink[name={sink_name}]'.format(sink_name=quoted_key(syslog_name)), sink_obj)
+            assert [sink.server_address for sink in admin_log_mgmt_pxy.get_config('/logging').sink if sink.name == syslog_name][0] == ip
+
+        for user, role_passwd_tuple in dict(write_users, **dict(read_users, **fail_users)).items():
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, role_passwd_tuple))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, role_passwd_tuple[1])
+            user_log_mgmt_pxy = user_session.proxy(RwlogMgmtYang)
+
+            if user in write_users:
+                logger.debug('User {} should be able to update the syslog server address'.format(user))
+                update_syslog_server_address(user_log_mgmt_pxy)
+
+            if user in fail_users:
+                with pytest.raises(Exception, message='User {} not authorised to set syslog server address'.format(user)) as excinfo:
+                    logger.debug('User {} trying to update the syslog server address. It should fail'.format(user))
+                    update_syslog_server_address(user_log_mgmt_pxy)
+
+            if user in read_users:
+                logger.debug('User {} trying to read the syslog server address'.format(user))
+                logging_obj = user_log_mgmt_pxy.get_config('/logging')
+                assert logging_obj.sink[0]
+                assert logging_obj.sink[0].server_address
+
+    @pytest.mark.skipif(not pytest.config.getoption("--redundancy-role-test"), reason="need --redundancy-role-test option to run")
+    def test_redundancy_config_authorization(self, mgmt_session, users_test_data, session_class, confd_host, logger, redundancy_config_test_roles):
+        """Verifies only users with certain roles can set redundancy-config or read redundancy-state"""
+        write_users, read_users, fail_users = users_test_data
+        admin_redundancy_pxy = mgmt_session.proxy(RwRedundancyYang)
+        site_nm_pfx = 'ha_site_'
+
+        def create_redundancy_site(user_redundancy_pxy, site_nm):
+            site_id = '127.0.0.1'
+            site_obj = RwRedundancyYang.YangData_RwRedundancy_RedundancyConfig_Site.from_dict({'site_name': site_nm, 'site_id': site_id})
+
+            logger.debug('Creating redundancy site {}'.format(site_nm))
+            user_redundancy_pxy.create_config('/rw-redundancy:redundancy-config/rw-redundancy:site', site_obj)
+            assert [site.site_name for site in admin_redundancy_pxy.get_config('/redundancy-config/site', list_obj=True).site if site.site_name == site_nm]
+
+        def delete_redundancy_site(user_redundancy_pxy, site_nm):
+            logger.debug('Deleting redundancy site {}'.format(site_nm))
+            user_redundancy_pxy.delete_config('/rw-redundancy:redundancy-config/rw-redundancy:site[rw-redundancy:site-name={}]'.format(quoted_key(site_nm)))
+            assert not [site.site_name for site in admin_redundancy_pxy.get_config('/redundancy-config/site', list_obj=True).site if site.site_name == site_nm]
+
+        # Create a redundancy site which fail user will try to delete/ read user will try to read
+        create_redundancy_site(admin_redundancy_pxy, 'test_site')
+
+        for user, role_passwd_tuple in dict(write_users, **dict(read_users, **fail_users)).items():
+            logger.debug('Verifying user:(role,password) {}:{}'.format(user, role_passwd_tuple))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, role_passwd_tuple[1])
+            user_redundancy_pxy = user_session.proxy(RwRedundancyYang)
+            
+            if user in write_users:
+                site_nm = '{}_{}'.format(site_nm_pfx, user)
+                logger.debug('User {} should be able to create a new redundancy site {}'.format(user, site_nm))
+                create_redundancy_site(user_redundancy_pxy, site_nm)
+
+                logger.debug('User {} should be able to delete a redundancy site {}'.format(user, site_nm))
+                delete_redundancy_site(user_redundancy_pxy, site_nm)
+                
+                assert user_redundancy_pxy.get('/redundancy-state')
+
+            if user in fail_users:
+                site_nm = '{}_{}'.format(site_nm_pfx, user)
+                with pytest.raises(Exception, message='User {} not authorised to create redundancy site'.format(user)) as excinfo:
+                    logger.debug('User {} trying to create redundancy site {}. It should fail'.format(user, site_nm))
+                    create_redundancy_site(user_redundancy_pxy, site_nm)
+
+                with pytest.raises(Exception, message='User {} not authorised to delete redundancy site'.format(user)) as excinfo:
+                    logger.debug('User {} trying to delete redundancy site {}. It should fail'.format(user, site_nm))
+                    delete_redundancy_site(user_redundancy_pxy, 'test_site')
+
+            if user in read_users:
+                logger.debug('User {} trying to read redundancy-config'.format(user))
+                assert user_redundancy_pxy.get('/redundancy-state')
+                assert user_redundancy_pxy.get('/redundancy-config')
+
+
+@pytest.mark.depends('test_rbac_roles_setup')
+@pytest.mark.teardown('test_rbac_roles_setup')
+@pytest.mark.incremental
+class TestRbacTeardown(object):
+    def test_delete_project(self, rw_project_proxy, logger, project_keyed_xpath, project_acessible):
+        """Deletes projects used for the test"""
+        if rw_project_proxy.get_config(project_keyed_xpath.format(project_name=quoted_key(project_acessible))+'/project-state', list_obj=True):
+            logger.debug('Deleting project {}'.format(project_acessible))
+            rift.auto.mano.delete_project(rw_project_proxy, project_acessible)
+
+    def test_delete_users(self, users_test_data, logger, rw_user_proxy, rbac_platform_proxy, platform_config_keyed_xpath,
+                                    user_keyed_xpath, user_domain, rw_conman_proxy, project_acessible):
+        """Deletes the users which are part of rbac test-data and verify their deletion"""
+        write_users, read_users, fail_users = users_test_data
+
+        for user, role_passwd_tuple in dict(write_users, **dict(read_users, **fail_users)).items():
+            logger.debug('Deleting user:(role,password) {}:{}'.format(user, role_passwd_tuple))
+            if any('platform' in role for role in role_passwd_tuple[0]):
+                rbac_platform_proxy.delete_config(platform_config_keyed_xpath.format(user=quoted_key(user), domain=quoted_key(user_domain)))
+            rw_user_proxy.delete_config(user_keyed_xpath.format(user=quoted_key(user), domain=quoted_key(user_domain)))
+
+            # Verify if the user is deleted
+            user_config = rw_user_proxy.get_config('/user-config')
+            current_users_list = [user.user_name for user in user_config.user]
+
+            assert user not in current_users_list
+
+        # Verify only two users should be present now: oper & admin
+        user_config = rw_user_proxy.get_config('/user-config')
+        current_users_list = [user.user_name for user in user_config.user]
+
+        logger.debug('Current users list after deleting all test users: {}'.format(current_users_list))
+        expected_empty_user_list = [user for user in users_test_data if user in current_users_list]
+        assert not expected_empty_user_list
diff --git a/rwlaunchpad/ra/pytest/ns/rbac/test_rbac_usages.py b/rwlaunchpad/ra/pytest/ns/rbac/test_rbac_usages.py
new file mode 100644
index 0000000..cff1c9c
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/rbac/test_rbac_usages.py
@@ -0,0 +1,549 @@
+#!/usr/bin/env python3
+"""
+#
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+"""
+
+import gi
+import pytest
+import time
+import random
+import rift.auto.mano
+import rift.auto.descriptor
+
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwRbacInternalYang', '1.0')
+gi.require_version('RwRbacPlatformYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwUserYang', '1.0')
+gi.require_version('RwOpenidcProviderYang', '1.0')
+from gi.repository import (
+    RwConmanYang,
+    RwProjectVnfdYang,
+    RwProjectNsdYang,
+    RwNsrYang,
+    RwVnfrYang,
+    RwVlrYang,
+    RwRbacInternalYang,
+    RwRbacPlatformYang,
+    RwProjectYang,
+    RwUserYang,
+    RwOpenidcProviderYang,
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+@pytest.fixture(scope='session')
+def complex_scaling_factor():
+    return 10
+
+@pytest.mark.incremental
+class TestRbacSetup(object):
+    def test_onboarded_vnfds_project_independent(self, descriptors, logger, rbac_platform_proxy, rw_conman_proxy, rw_user_proxy,
+        rw_project_proxy, rbac_user_passwd, user_domain, fmt_vnfd_catalog_xpath, session_class, confd_host, fmt_vnfd_id_xpath, rw_rbac_int_proxy):
+        """Same VNFDs on boarded in two different projects. VNFD changes in one project shouldn't affect another."""
+        map_project_user_roles = {
+                                    'user1': ('project_test_onboarded_vnfds_project_independent_1', 'rw-project-mano:catalog-admin'),
+                                    'user2': ('project_test_onboarded_vnfds_project_independent_2', 'rw-project:project-admin'),
+                                    }
+        user_to_modify_vnfds, user_not_supposed_to_see_vnfd_changes = 'user1', 'user2'
+
+        modified_vnfd_name = 'test_rbac_vnfd'
+        user_sessions = {}
+        logger.debug('descriptors being used: {}'.format(descriptors))
+
+        for user, project_role_tuple in map_project_user_roles.items():
+            project_name, role = project_role_tuple
+            logger.debug('Creating user {} with {}'.format(user, project_role_tuple))
+
+            rift.auto.mano.create_project(rw_conman_proxy, project_name)
+            rift.auto.mano.create_user(rw_user_proxy, user, rbac_user_passwd, user_domain)
+            if 'platform' in role:
+                rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, role, user, user_domain, rw_rbac_int_proxy)
+            else:
+                rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, user,
+                                project_name, user_domain, rw_rbac_int_proxy)
+
+            logger.debug('User {} onboarding the packages'.format(user))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd)
+            user_sessions[user] = user_session
+            for descriptor in descriptors:
+                rift.auto.descriptor.onboard(user_session, descriptor, project=project_name)
+
+        vnfd_pxy = user_sessions[user_to_modify_vnfds].proxy(RwProjectVnfdYang)
+        vnfd_xpath = '{}/vnfd'.format(fmt_vnfd_catalog_xpath.format(project=quoted_key(map_project_user_roles[user_to_modify_vnfds][0])))
+        for vnfd in vnfd_pxy.get(vnfd_xpath, list_obj=True).vnfd:
+            logger.debug('Changing the vnfd name from {} to {} for user {}'.format(vnfd.name, modified_vnfd_name, user_to_modify_vnfds))
+            vnfd.name = modified_vnfd_name
+            vnfd_pxy.replace_config(fmt_vnfd_id_xpath.format(
+                project=quoted_key(map_project_user_roles[user_to_modify_vnfds][0]), vnfd_id=quoted_key(vnfd.id)), vnfd)
+
+        for vnfd in vnfd_pxy.get(vnfd_xpath, list_obj=True).vnfd:
+            assert vnfd.name == modified_vnfd_name
+
+        vnfd_pxy = user_sessions[user_not_supposed_to_see_vnfd_changes].proxy(RwProjectVnfdYang)
+        vnfd_xpath = '{}/vnfd'.format(fmt_vnfd_catalog_xpath.format(project=quoted_key(map_project_user_roles[user_not_supposed_to_see_vnfd_changes][0])))
+        for vnfd in vnfd_pxy.get(vnfd_xpath, list_obj=True).vnfd:
+            logger.debug('Verifying the vnfd name {} for user {} did not change to {}'.format(
+                vnfd.name, user_not_supposed_to_see_vnfd_changes, modified_vnfd_name))
+            assert vnfd.name != modified_vnfd_name
+
+    def test_multi_projects_multi_vnf(
+            self, rw_project_proxy, rw_conman_proxy, cloud_account,
+            cloud_module, descriptors, session_class,
+            confd_host, user_domain, mgmt_session, fmt_nsd_catalog_xpath,
+            logger, rw_rbac_int_proxy):
+        """Creates multiple projects, cloud accounts and then
+        instantiates them. Then it lets the instantiated NS's run for a minute
+        after which gets terminated. Use the SCALE_FACTOR to adjust the number
+        of instantiations."""
+
+        def instantiate_nsr_not_wait(nsr, rwnsr_proxy, project='default'):
+            ns_instance_opdata_xpath = '/project[name={}]/ns-instance-opdata'.format(quoted_key(project))
+            rwnsr_proxy.create_config('/rw-project:project[rw-project:name={}]/nsr:ns-instance-config/nsr:nsr'.format(quoted_key(project)), nsr)
+            nsr_opdata = rwnsr_proxy.get('{}/nsr[ns-instance-config-ref={}]'.format(ns_instance_opdata_xpath, quoted_key(nsr.id)))
+            assert nsr_opdata is not None
+
+            nsr_opdata = rwnsr_proxy.get(ns_instance_opdata_xpath)
+            nsr_ = [nsr_ for nsr_ in nsr_opdata.nsr if nsr_.ns_instance_config_ref==nsr.id][0]
+
+        #Creating multiple projects according to the scale factor
+        SCALE_FACTOR = 5
+        PROJECT_LIST = {}
+        for idx in range(1,SCALE_FACTOR+1):
+            rift.auto.mano.create_project(rw_conman_proxy, 'cloud_project_{}'.format(idx))
+            PROJECT_LIST['cloud_project_{}'.format(idx)] = None
+            rift.auto.mano.assign_project_role_to_user(rw_project_proxy, 'rw-project:project-admin', 'admin', 'cloud_project_{}'
+                                                                        .format(idx), 'system', rw_rbac_int_proxy)
+        #Creating cloud accounts, uploading descriptors, instantiating NS
+        for project_name in PROJECT_LIST:
+            rift.auto.mano.create_cloud_account(mgmt_session, cloud_account, project_name)
+            for descriptor in descriptors:
+                rift.auto.descriptor.onboard(mgmt_session, descriptor, project=project_name)
+            admin_nsd_pxy = mgmt_session.proxy(RwProjectNsdYang)
+            nsd_catalog = admin_nsd_pxy.get_config(fmt_nsd_catalog_xpath.format(project=quoted_key(project_name)))
+            assert nsd_catalog
+            nsd = nsd_catalog.nsd[0]
+            nsr = rift.auto.descriptor.create_nsr(cloud_account.name, nsd.name, nsd)
+            PROJECT_LIST[project_name] = nsr
+
+        for project_name, NSR in PROJECT_LIST.items():
+            admin_rwnsr_pxy = mgmt_session.proxy(RwNsrYang)
+            admin_rwvnfr_pxy = mgmt_session.proxy(RwVnfrYang)
+            admin_rwvlr_pxy = mgmt_session.proxy(RwVlrYang)
+            instantiate_nsr_not_wait(NSR, admin_rwnsr_pxy,
+                                     project=project_name)
+
+        # Waiting for NS's to get started and configured.
+        for project_name in PROJECT_LIST:
+            admin_rwnsr_pxy = mgmt_session.proxy(RwNsrYang)
+            nsr_opdata = admin_rwnsr_pxy.get('/rw-project:project[rw-project:name={}]/ns-instance-opdata'.format(quoted_key(project_name)))
+            nsrs = nsr_opdata.nsr
+
+            for nsr in nsrs:
+                xpath = "/rw-project:project[rw-project:name={}]/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(
+                    quoted_key(project_name), quoted_key(nsr.ns_instance_config_ref))
+                admin_rwnsr_pxy.wait_for(xpath, "running", fail_on=['failed'], timeout=400)
+
+            for nsr in nsrs:
+                xpath = "/rw-project:project[rw-project:name={}]/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(
+                    quoted_key(project_name), quoted_key(nsr.ns_instance_config_ref))
+                admin_rwnsr_pxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=400)
+
+        # Letting the started NS's run for a minute after which is terminated
+        start_time = time.time()
+        while (time.time() - start_time) < 60:
+            time.sleep(2)
+        for project_name in PROJECT_LIST:
+            rift.auto.descriptor.terminate_nsr(
+                admin_rwvnfr_pxy, admin_rwnsr_pxy, admin_rwvlr_pxy, logger,
+                project=project_name)
+
+    def test_descriptor_nsr_persistence_check(
+            self, rw_conman_proxy, rw_user_proxy, rw_project_proxy,
+            cloud_account, cloud_module, mgmt_session, descriptors, logger,
+            user_domain, session_class, confd_host, rbac_user_passwd,
+            fmt_nsd_catalog_xpath, rw_rbac_int_proxy):
+        """Creates a project and cloud account for it. Uploads descriptors.
+        Logs in as project-admin and checks if the uploaded descriptors
+        are still there, after which he logs out.
+        Then instantiates nsr. Again logs in as project admin and checks
+        if the instantiated nsr is still there."""
+        # Creating a project, assigning project admin and creating
+        # a cloud account for the project
+        for idx in range(1,6):
+            rift.auto.mano.create_project(rw_conman_proxy, 'xcloud_project_{}'.format(idx))
+            rift.auto.mano.create_user(rw_user_proxy, 'project_admin_{}'.format(idx), rbac_user_passwd, user_domain)
+            rift.auto.mano.assign_project_role_to_user(rw_project_proxy, 'rw-project:project-admin', 'project_admin_{}'
+                                            .format(idx), 'xcloud_project_{}'.format(idx), user_domain, rw_rbac_int_proxy)
+            rift.auto.mano.create_cloud_account(mgmt_session, cloud_account, 'xcloud_project_{}'.format(idx))
+            #Uploading descriptors and verifying its existence from another user(project admin)
+            for descriptor in descriptors:
+                rift.auto.descriptor.onboard(mgmt_session, descriptor, project='xcloud_project_{}'.format(idx))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, 'project_admin_{}'.format(idx), rbac_user_passwd)
+            project_admin_nsd_pxy = user_session.proxy(RwProjectNsdYang)
+            nsd_catalog = project_admin_nsd_pxy.get_config(fmt_nsd_catalog_xpath.format(project=quoted_key('xcloud_project_{}'.format(idx))))
+            assert nsd_catalog, "Descriptor Not found on try no: {}".format(idx)
+            nsd = nsd_catalog.nsd[0]
+            nsr = rift.auto.descriptor.create_nsr(cloud_account.name, nsd.name, nsd)
+            rift.auto.mano.close_session(user_session)
+            #Instantiating the nsr and verifying its existence from another user(project admin), after which it gets terminated
+            admin_rwnsr_pxy = mgmt_session.proxy(RwNsrYang)
+            admin_rwvnfr_pxy = mgmt_session.proxy(RwVnfrYang)
+            admin_rwvlr_pxy = mgmt_session.proxy(RwVlrYang)
+
+            rift.auto.descriptor.instantiate_nsr(nsr, admin_rwnsr_pxy, logger, project='xcloud_project_{}'.format(idx))
+            user_session = rift.auto.mano.get_session(session_class, confd_host, 'project_admin_{}'.format(idx), rbac_user_passwd)
+            pxy = user_session.proxy(RwNsrYang)
+            nsr_opdata = pxy.get('/rw-project:project[rw-project:name={}]/ns-instance-opdata'.format(quoted_key('xcloud_project_{}'.format(idx))))
+            nsrs = nsr_opdata.nsr
+            for nsr in nsrs:
+                xpath = "/rw-project:project[rw-project:name={}]/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(
+                                quoted_key('xcloud_project_{}'.format(idx)), quoted_key(nsr.ns_instance_config_ref))
+                pxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=60)
+            rift.auto.mano.close_session(user_session)
+            rift.auto.descriptor.terminate_nsr(
+                admin_rwvnfr_pxy, admin_rwnsr_pxy, admin_rwvlr_pxy, logger,
+                project='xcloud_project_{}'.format(idx))
+
+    def delete_records(self, nsd_proxy, vnfd_proxy, project_name='default'):
+        """Delete the NSD & VNFD records."""
+        nsds = nsd_proxy.get(
+            "/rw-project:project[rw-project:name={}]/nsd-catalog/nsd".format(
+                quoted_key(project_name)),
+            list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = (
+                "/rw-project:project[rw-project:name={}]".format(
+                    quoted_key(project_name)) +
+                "/nsd-catalog/nsd[id={}]".format(quoted_key(nsd.id))
+            )
+            nsd_proxy.delete_config(xpath)
+
+        nsds = nsd_proxy.get(
+            "/rw-project:project[rw-project:name={}]/nsd-catalog/nsd".format(
+                quoted_key(project_name)),
+            list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get(
+            "/rw-project:project[rw-project:name={}]/vnfd-catalog/vnfd".format(
+                quoted_key(project_name)),
+            list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = (
+                "/rw-project:project[rw-project:name={}]/".format(
+                    quoted_key(project_name)) +
+                "vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
+            )
+            vnfd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get(
+            "/rw-project:project[rw-project:name={}]/vnfd-catalog/vnfd".format(
+                quoted_key(project_name)),
+            list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
+
+    def test_delete_project_and_vim_accounts(
+            self, rw_conman_proxy, rw_user_proxy, logger,
+            rbac_user_passwd, user_domain, rw_project_proxy, rw_rbac_int_proxy,
+            mgmt_session, cloud_module, cloud_account, descriptors,
+            fmt_nsd_catalog_xpath, session_class, confd_host):
+        """Testing vim accounts."""
+        # Create a project and three cloud accounts for it.
+        rift.auto.mano.create_project(rw_conman_proxy, 'vim_project')
+        rift.auto.mano.assign_project_role_to_user(
+            rw_project_proxy, 'rw-project:project-admin', 'admin',
+            'vim_project', 'system', rw_rbac_int_proxy)
+        for idx in range(1, 4):
+            rift.auto.mano.create_cloud_account(
+                mgmt_session, cloud_account,
+                'vim_project', 'cloud_account_{}'.format(idx))
+        # Uploading descriptors
+        for descriptor in descriptors:
+            rift.auto.descriptor.onboard(
+                mgmt_session, descriptor, project='vim_project')
+        nsd_pxy = mgmt_session.proxy(RwProjectNsdYang)
+        nsd_catalog = nsd_pxy.get_config(fmt_nsd_catalog_xpath.format(
+            project=quoted_key('vim_project')))
+        assert nsd_catalog
+        nsd = nsd_catalog.nsd[0]
+        nsr = rift.auto.descriptor.create_nsr(
+            'cloud_account_1', nsd.name, nsd)
+        # Instantiating the nsr
+        rwnsr_pxy = mgmt_session.proxy(RwNsrYang)
+        rift.auto.descriptor.instantiate_nsr(
+            nsr, rwnsr_pxy, logger, project='vim_project')
+        # Trying to delete the project before taking the instance down
+        with pytest.raises(
+                Exception,
+                message="Project deletion should've failed"):
+            rift.auto.mano.delete_project(rw_conman_proxy, 'vim_project')
+        # Trying to delete the vim account before taking the instance down
+        with pytest.raises(
+                Exception,
+                message="Vim account deletion should've failed"):
+            rift.auto.mano.delete_cloud_account(
+                mgmt_session, 'cloud_account_1', 'vim_project')
+        # Terminating the nsr
+        rwvnfr_pxy = mgmt_session.proxy(RwVnfrYang)
+        rwvlr_pxy = mgmt_session.proxy(RwVlrYang)
+        rift.auto.descriptor.terminate_nsr(
+            rwvnfr_pxy, rwnsr_pxy, rwvlr_pxy, logger, project='vim_project')
+        # Delete all cloud accounts for the project
+        for idx in range(1, 4):
+            rift.auto.mano.delete_cloud_account(
+                mgmt_session, 'cloud_account_{}'.format(idx), 'vim_project')
+        # Delete the uploaded descriptors
+        vnfd_proxy = mgmt_session.proxy(RwProjectVnfdYang)
+        self.delete_records(nsd_pxy, vnfd_proxy, 'vim_project')
+        # Delete the project
+        rift.auto.mano.delete_project(rw_conman_proxy, 'vim_project')
+        # Check in rw-rbac-internal if project is removed
+        rwinternal_xpath = '/rw-rbac-internal/role'
+        response = (
+            rw_rbac_int_proxy.get(
+                rwinternal_xpath, list_obj=True)
+        ).as_dict()['role']
+        keys = [role['keys'] for role in response if 'keys' in role]
+        for key in keys:
+            assert 'vim_project' not in key, "Improper project deletion"
+
+    @pytest.mark.skipif(
+        not pytest.config.getoption("--complex-scaling"),
+        reason="need --complex-scaling option to run")
+    def test_complex_scaling(
+            self, rw_conman_proxy, rw_user_proxy, rbac_user_passwd,
+            user_domain, rw_project_proxy, rw_rbac_int_proxy, logger,
+            rbac_platform_proxy, user_roles, platform_roles, mgmt_session,
+            cloud_module, cloud_account, rw_ro_account_proxy,
+            tbac, fmt_nsd_catalog_xpath, descriptors, complex_scaling_factor):
+        """Complex scaling - Default values.
+
+        No. of projects - 25 (Two users & two cloud accounts per project)
+        No. of users - 50 (Two roles per user)
+        No. of cloud accounts - 50
+        No. of RO accounts - 25 (50 if you are considering the default 'rift').
+        """
+        # This test can be controlled using complex_scaling_factor fixture
+        logger.debug('Creating projects')
+        for idx in range(1, complex_scaling_factor + 1):
+            rift.auto.mano.create_project(
+                rw_conman_proxy, 'scaling_project_{}'.format(idx)
+            )
+        logger.debug('Create users, cloud accounts double the no. of projects')
+        for idx in range(1, (2 * complex_scaling_factor) + 1):
+            project_index = int((idx + 1) / 2)
+            rift.auto.mano.create_user(
+                rw_user_proxy, 'scaling_user_{}'.format(idx),
+                rbac_user_passwd, user_domain)
+            # Each user has a project role & platform role
+            pr_role = random.choice(user_roles)
+            pl_role = random.choice(platform_roles)
+            rift.auto.mano.assign_project_role_to_user(
+                rw_project_proxy, pr_role, 'scaling_user_{}'.format(idx),
+                'scaling_project_{}'.format(project_index), user_domain,
+                rw_rbac_int_proxy)
+            rift.auto.mano.assign_platform_role_to_user(
+                rbac_platform_proxy, pl_role,
+                'scaling_user_{}'.format(idx), user_domain, rw_rbac_int_proxy)
+            # Creating two cloud accounts for each project
+            rift.auto.mano.create_cloud_account(
+                mgmt_session, cloud_account,
+                'scaling_project_{}'.format(project_index),
+                'cloud_account_{}'.format(idx)
+            )
+        logger.debug('Creating RO accounts')
+        for idx in range(1, complex_scaling_factor + 1):
+            rift.auto.mano.create_ro_account(
+                rw_ro_account_proxy, 'ro_account_{}'.format(idx),
+                'scaling_project_{}'.format(idx)
+            )
+            # Uploading descriptors
+            for descriptor in descriptors:
+                rift.auto.descriptor.onboard(
+                    mgmt_session, descriptor,
+                    project='scaling_project_{}'.format(idx)
+                )
+            nsd_pxy = mgmt_session.proxy(RwProjectNsdYang)
+            nsd_catalog = nsd_pxy.get_config(
+                fmt_nsd_catalog_xpath.format(
+                    project=quoted_key('scaling_project_{}'.format(idx))
+                )
+            )
+            assert nsd_catalog
+
+    @pytest.mark.skipif(
+        not pytest.config.getoption("--complex-scaling"),
+        reason="need --complex-scaling option to run")
+    def test_complex_scaling_verification(
+            self, complex_scaling_factor, rw_project_proxy, rw_ro_account_proxy,
+            mgmt_session, fmt_nsd_catalog_xpath, cloud_module, logger):
+        """Reboot verification script for test_complex_scaling."""
+        for idx in range(1, complex_scaling_factor + 1):
+            # Verifying projects
+            logger.debug('Verification: projects, ro accounts started')
+            project_name = 'scaling_project_{}'.format(idx)
+            project_cm_config_xpath = '/project[name={project_name}]/project-state'
+            project_ = rw_project_proxy.get_config(
+                project_cm_config_xpath.format(
+                    project_name=quoted_key(project_name)
+                ),
+                list_obj=True
+            )
+            assert project_
+            # Verifying RO Accounts
+            ro_account_name = 'ro_account_{}'.format(idx)
+            ro_obj = rw_ro_account_proxy.get_config(
+                '/project[name={}]/ro-account/account[name={}]'.format(
+                    quoted_key(project_name), quoted_key(ro_account_name))
+            )
+            assert ro_obj.name == ro_account_name
+            assert ro_obj.ro_account_type == 'openmano'
+            logger.debug('Verification: descriptors, cloud accounts started')
+            # Verifying Descriptors
+            nsd_pxy = mgmt_session.proxy(RwProjectNsdYang)
+            nsd_catalog = nsd_pxy.get_config(
+                fmt_nsd_catalog_xpath.format(
+                    project=quoted_key(project_name)
+                )
+            )
+            assert nsd_catalog
+        for idx in range(1, (2 * complex_scaling_factor) + 1):
+            # Verifying cloud accounts
+            project_index = int((idx + 1) / 2)
+            project_name = 'scaling_project_{}'.format(project_index)
+            cloud_acc_name = 'cloud_account_{}'.format(idx)
+            fmt_cloud_xpath = (
+                '/project[name={project}]/cloud/account[name={account_name}]'
+            )
+            cloud_pxy = mgmt_session.proxy(cloud_module)
+            response = cloud_pxy.get(fmt_cloud_xpath.format(
+                project=quoted_key(project_name),
+                account_name=quoted_key(cloud_acc_name))
+            )
+            assert response.name == cloud_acc_name
+
+
+    def test_change_visibility_same_session(self, session_class, rw_conman_proxy, confd_host, logger,
+            user_domain, project_keyed_xpath, rw_project_proxy, rw_rbac_int_proxy, rw_user_proxy):
+        """admin make changes which is seen by the operator already logged in for the same project.
+
+        oper is logged in. admin assigns oper to a new project X. oper should be able to see the new project X being \
+        in the same session without re-logging-in.
+        """
+        user = 'oper2' if user_domain != 'default' else 'oper'
+        oper_user, oper_passwd = [user]*2
+        
+        if user_domain != 'default':
+            rift.auto.mano.create_user(rw_user_proxy, oper_user, oper_passwd, user_domain)
+            rift.auto.mano.assign_project_role_to_user(rw_project_proxy, 'rw-project:project-oper', oper_user,
+                                                       'default', user_domain, rw_rbac_int_proxy)
+        oper_session = rift.auto.mano.get_session(session_class, confd_host, oper_user, oper_passwd)
+        oper_conman_pxy = oper_session.proxy(RwProjectYang)
+
+        default_project_cm_config_xpath = project_keyed_xpath.format(project_name=quoted_key('default'))+'/project-state'
+        assert oper_conman_pxy.get_config(default_project_cm_config_xpath, list_obj=True)
+
+        # admin assigns oper 'project-admin' role under a new project
+        new_project = 'project_test_change_visibility_same_session_1'
+        rift.auto.mano.create_project(rw_project_proxy, new_project)
+        rift.auto.mano.assign_project_role_to_user(rw_project_proxy, 'rw-project:project-admin', oper_user, new_project,
+                                                   user_domain, rw_rbac_int_proxy)
+
+        # Check oper user should be able to access the new project
+        new_project_cm_config_xpath = project_keyed_xpath.format(project_name=quoted_key(new_project))+'/project-state'
+        assert oper_conman_pxy.get_config(new_project_cm_config_xpath, list_obj=True)
+
+    def test_super_admin(
+            self, rw_user_proxy, rbac_platform_proxy, rw_project_proxy,
+            session_class, confd_host, rbac_user_passwd, user_domain,
+            rw_rbac_int_proxy):
+        """Variou tests on the super-admin role."""
+        # Creating two super admins and then deleting the first one.
+        rift.auto.mano.create_user(
+            rw_user_proxy, 'super_admin', rbac_user_passwd, user_domain)
+        rift.auto.mano.assign_platform_role_to_user(
+            rbac_platform_proxy, 'rw-rbac-platform:super-admin',
+            'super_admin', user_domain, rw_rbac_int_proxy)
+        rift.auto.mano.create_user(
+            rw_user_proxy, 'super_admin_2', rbac_user_passwd, user_domain)
+        rift.auto.mano.assign_platform_role_to_user(
+            rbac_platform_proxy, 'rw-rbac-platform:super-admin',
+            'super_admin_2', user_domain, rw_rbac_int_proxy)
+
+        user_session = rift.auto.mano.get_session(
+            session_class, confd_host, 'super_admin_2', rbac_user_passwd)
+        pxy = user_session.proxy(RwRbacPlatformYang)
+        role_keyed_path = (
+            "/rbac-platform-config/" +
+            "user[user-name={user}][user-domain={domain}]"
+        )
+        pxy.delete_config(role_keyed_path.format(
+            user=quoted_key('super_admin'), domain=quoted_key(user_domain))
+        )
+        pxy = user_session.proxy(RwUserYang)
+        rift.auto.mano.delete_user(pxy, 'super_admin', user_domain)
+        rift.auto.mano.close_session(user_session)
+
+    @pytest.mark.skipif(not pytest.config.getoption("--tbac"), reason="need --tbac option to run")
+    def test_token_expiry_timeout(self, mgmt_session, rw_user_proxy, rw_conman_proxy, rbac_user_passwd, user_domain,
+        confd_host, logger, rw_project_proxy, rw_rbac_int_proxy, session_class):
+        """Set 30 seconds as token-expiry-timeout; then verifies an user session is automatically expired after 30 secs"""
+        test_user, role = 'user-1', 'rw-project:project-oper'
+        test_proj = 'project_test_token_expiry_timeout'
+        token_expiry_timeout = 30
+
+        logger.debug('Creating user {} under project {} and assigning it {}'.format(test_user, test_proj, role))
+        rift.auto.mano.create_project(rw_conman_proxy, test_proj)
+        rift.auto.mano.create_user(rw_user_proxy, test_user, rbac_user_passwd, user_domain)
+        rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, test_user, test_proj, user_domain, rw_rbac_int_proxy)
+
+        # admin user setting token_expiry_timeout
+        openidc_provider_xpath = '/rw-openidc-provider:openidc-provider-config'
+        openidc_provider = RwOpenidcProviderYang.YangData_RwOpenidcProvider_OpenidcProviderConfig.from_dict(
+                                                                {'token_expiry_timeout': 30})
+        pxy = mgmt_session.proxy(RwOpenidcProviderYang)
+        logger.debug('Settig token_expiry_timeout to {} secs'.format(token_expiry_timeout))
+        pxy.replace_config(openidc_provider_xpath, openidc_provider)
+
+        # Verifying if token_expiry_timeout is set in openidc-provider-config
+        openidc_provider = pxy.get_config(openidc_provider_xpath)
+        assert openidc_provider
+        assert openidc_provider.token_expiry_timeout == token_expiry_timeout
+
+        def project_access(user_session):
+            user_conman_pxy = user_session.proxy(RwProjectYang)
+            assert user_conman_pxy.get_config('/project[name={}]/project-state'.format(quoted_key(test_proj)), list_obj=True)
+
+        # Log-in as test_user and validate operations under that user getting 'Unauthorized' after time-out
+        user_session = rift.auto.mano.get_session(session_class, confd_host, test_user, rbac_user_passwd)
+        project_access(user_session)
+
+        logger.debug('Sleeping for {} secs'.format(token_expiry_timeout))
+        time.sleep(token_expiry_timeout+5)
+
+        with pytest.raises(Exception, message='logged-in user able to access default project even after token expired'):
+            logger.debug('User {} trying to access default project. It should fail')
+            project_access(user_session)
+
+        # log-in as same user and perform the same operation. It should pass now.
+        user_session = rift.auto.mano.get_session(session_class, confd_host, test_user, rbac_user_passwd)
+        project_access(user_session)
diff --git a/rwlaunchpad/ra/pytest/ns/rbac/test_tbac_token.py b/rwlaunchpad/ra/pytest/ns/rbac/test_tbac_token.py
new file mode 100644
index 0000000..9db5ca1
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/rbac/test_tbac_token.py
@@ -0,0 +1,542 @@
+#!/usr/bin/env python3
+"""
+#
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+"""
+# RIFT_IO_STANDARD_COPYRIGHT_HEADER(BEGIN)
+# Author(s): Balaji Rajappa, Vishnu Narayanan K.A
+# Creation Date: 2017-07-07
+# RIFT_IO_STANDARD_COPYRIGHT_HEADER(END)
+
+import gi
+import json
+import urllib.parse
+
+import rift.auto.mano
+import pytest
+import tornado.httpclient
+import time
+import Cryptodome.PublicKey.RSA as RSA
+
+import oic.utils.jwt as oic_jwt
+import oic.utils.keyio as keyio
+from jwkest.jwk import RSAKey
+from rift.rwlib.util import certs
+gi.require_version('RwOpenidcProviderYang', '1.0')
+gi.require_version('RwRbacInternalYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwKeyspec', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+from gi.repository import ( # noqa
+    RwOpenidcProviderYang,
+    RwProjectNsdYang,
+    RwProjectYang,
+    RwRbacInternalYang,
+    RwConmanYang,
+)
+from gi.repository.RwKeyspec import quoted_key # noqa
+
+
+PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAs9bRFjWofNeWq2qtsvH9iDZXXbv5NQI6avK1hSt+0W0g3SXW
+hllNenZAhFpXHzZvJk2qEoNIRXIeonX4N62FBLD7ZoWHQDGahkyfhxML4jYA3KUa
+PWGeUvMlRPkoR4NjHA3zXQvD2FwTtcKCulGYQHRAAyATIcNq0kKZMuMAJxC5A7VD
+vQVb7vOaN01YxJt+L6KF0v4ZiYdse5yBI/X58i2gnLqy102Oqj2qZygazj5LLdTE
+sjgsiC9ln6kesbRayXiqi+RnF+BeKKlwGCuUpH+vFGxXmT6Kr4iEiGIHxAs/HZOS
+9m61z1eHjrce654mpqwbeqhsyQZswyab2IpERwIDAQABAoIBABrnK+gypr3mUnfa
+QZnfcZoK5w7yq9kuOCb/oAAH/bS+qofqvSjj+x8yyXwDN71Hm2EThTm3wfwBkmuj
+UjqiDqAhCbrQhucnhIJKvCKsyr5QbdiUKlS8Yb7u+MhUrZ3lHdJ4k8t7kxSu0ZQD
+QSM2SZx6x4iwJ6yJW1WQ+PIP21n8ejraQ9PzqpuUsNXh05DU8qN/nJHe311D5ZuB
+UnSHdfGaF+EBbNxPLzV028db+L9m3a+h87uZhyqwRlUXP+swlToVNvF74bs+mflz
+r5JN6CwRM3VamnwmcnE77D/zyCsP1Js9LgoxhzhdcUwIOYVWRzUUVRCsrtYOSGF7
+WBzC3WECgYEA0hGtnBw5rryubv0kWDjZoVGvuwDo7BOW1JFXZYJwvweEj7EjWFTY
+bVk+MYs1huG+0NpNuhw6IYmDPIEkoLVNGuTHBMnA+SzQx/xv719b1OmY0Wl8ikYd
+Xlmhxr7mjAJX4eqkVTrBGtsi6TCLdk3HnUdpXJQ0k2aUN6hNFJfsmhUCgYEA2ykP
+hdVzP1ZtXsHEfHSOfRPIzX9gCLETghntAf44MCF+hHZeEVnuTSrfeqELvy5qCarA
+FgjZ77p7q6R7YP2KBQUc/gzZStjGIOCPv9xI8otXrmQRVXOxWNafeDp+TOPa2o9S
+2bBovNmN4Kc+ayktATCVuabMbuGiMIPuRY1pR+sCgYEAmdJSEw7j+hy1ihYZJ/Sw
+/5xmFoQLCtspRgwLOAx07Jzfp6xpGkQ+mouPrA2oq1TgOeSwp8gFlQsxqvtRy9AW
+XswJI2tsv8jeNKKXgGuOPfCzcxxQEpxW4wC1ImglP35zxbzginxUbIrsHF7ssDsy
+IOvqrdzkRs8FV2AI2TyKByUCgYEAuhdDdwCnu0BH3g3qKUNPOiVyfAuMH9U8G1yo
+Quj6DORj6VYYyeLy1dNxr07QCqX+o/a44/zgEQ7ns/cWTGT8rQaKd62xVDx8/62u
+YdtKlah76zhM/6IdFLIo9o20cNWJH8xTLUT9ql2QexGHjraH4FrAx8M6E2zDqy5b
+Q/OvUcECgYAjt8XosvUiRpZ1ugMxwAx316IIEgs2u7k4xdQESnVhIOM3Ex5ikXkK
+I0Hu/2XPH3KO6+6BOhtdZ4qXLf4hikbIisgq3P87Fb2rUElYZjVm3vClYhEzx6ym
+bSWO/cZTpp9L14qMuWzb13pD20GExPOIBh1m0exvoL3M8JhLShutWw==
+-----END RSA PRIVATE KEY-----"""
+
+PUBLIC_KEY = """-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAs9bRFjWofNeWq2qtsvH9
+iDZXXbv5NQI6avK1hSt+0W0g3SXWhllNenZAhFpXHzZvJk2qEoNIRXIeonX4N62F
+BLD7ZoWHQDGahkyfhxML4jYA3KUaPWGeUvMlRPkoR4NjHA3zXQvD2FwTtcKCulGY
+QHRAAyATIcNq0kKZMuMAJxC5A7VDvQVb7vOaN01YxJt+L6KF0v4ZiYdse5yBI/X5
+8i2gnLqy102Oqj2qZygazj5LLdTEsjgsiC9ln6kesbRayXiqi+RnF+BeKKlwGCuU
+pH+vFGxXmT6Kr4iEiGIHxAs/HZOS9m61z1eHjrce654mpqwbeqhsyQZswyab2IpE
+RwIDAQAB
+-----END PUBLIC KEY-----"""
+
+WRONG_PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEA230Ic8gqYGrIYPffrgvS9ezrI94+TMwIX0A3nyi6nRBOAzuV
+OMP0L4OegDLnAkyUC4ZiH6B9uAJ1mbp4WsX0Q2a3FuGzscCfriV0JKRd4256Mj60
+bGq7xLqR/d62IzLrQ2eJCQe2IspwUIeAW301igwoPIGTfZurQ6drXBcbRVo7adry
+V3+TGsfQVge95IyVAPm4A7kcJsdQu9HsD7Hp9LIM35B3oHCOF7hHP/MEEAz84Q6q
+lpWxdTzSnIxDXWxS2BqPInKOIL5egpn69AfJKLj+QPpQymULx3FCeNKeHmSICHtP
+r0uTckEek0kfFT2W6hIU1w1f+Pkddhc1fY45VQIDAQABAoIBABvOsHZywqOqg659
+WPJk/xo3JOdLbdsu8lSW/zUD5PinKysPrm0drl8irr8RM+E/sHXxVZcqLyNT9HBA
+hqUBdVvgtIuKlsiLXe+jQR6vUFHTGlopRZSCxT08YeinAa5d8h59DIh/WJz5xtb9
+A88Tguf1eFeKFxSP11ff6yMkrkjP1KmvNRoTAC0MU3p/N6UT03roR9v6n4qGPF6p
+/fy6uhLWSJVl7IGFL18DEODid64ShK37VytnvLAMQp8OzL87OdoUW6qrA+z4FP59
+XSpXULxn6ayJG3VChT+Y+nb23rC6gzCYYb3qkSwep2xNqfblP8jL2k/NSlbshdiz
+j3BfK8ECgYEA6D7SMCXZ2hBYu8EBoGRmMLdtM+spps61JOAhgy2i9aNQ/YlKfuS9
+kvNFqT1DEpQsjcRmZIEVb5uJQJYUDx6zj4eUSzkISvziz43dg4RKpC/ktprp9RQ1
+8sAQD4n5Xy2chdTQHKfGl4oF5b16wpi0eE97XptDOlLgPhk167woUQUCgYEA8fAt
+8uZxw0aKkQbF+tYItsWQQP87dJGUeLna4F3T6q5L5WJYCtFqILiFfWwfcjEaOKWV
+JzKr0f9pLrRxXYdFUxNolOhA1hZCqZu2ZzpSlfsPWhp2WflGi6DqzSByhgVuwHbV
+pRl0TRE2dQVgpuXxxiURREHoHJPZRc+3sOwU+BECgYAZJXQssmx8J/jzm1pJu5U1
+ASdZz8Sawxbp/zqhsXdLkXtbeFoQk0PTfXO1d2Sjxldsoi9UAoYHp5ec3qMdX/2h
+NNThsDMtq2QDhSDO9KwASw9AllVuq9mLhzA1/oJ5w76G3xwJfkEKd29cCMAaAd7I
+iBKbk8QbtI2DK8ei1qSm4QKBgAPHvPAOqbhjYcbiVDWXIou4ioh5dHRd0fQQ81qO
+HMGN96Gd58JDg2T/fRZ4mgUuvzojXDFAmW6ujvYr25mag3rI0tmAx4KQ1nnP9Qmn
+36J4ScUepLrDKlcELKcH2sI9U32uXag2vZp2qmMpsljpPt3ZtmtanEXWCY8Nr9ET
+30ABAoGAQ63wGwq1LPS6t/zU6CwOlIzGNnHDquO7o1o/h8IPt3BN6yF0NEVItjdi
+fL2ZwmBCUbO6Y/Jb1kh4a0iohWF33nS3J4Q6wSQUfBMG5jDI7GfuKAgTQl+sMkOM
+xjyKrWs/y7HtiP/2vf83QVEL8Bxr3WXdXHj1EBHFEMWA576J6mk=
+-----END RSA PRIVATE KEY-----"""
+
+roles = (
+    'rw-rbac-platform:super-admin', 'rw-project:project-admin',
+    'rw-project-mano:catalog-admin', 'rw-project:project-oper'
+)
+
+
+class Jwt:
+    """Jwt."""
+
+    def __init__(
+            self, private_key=None, public_key=None,
+            iss=None, sub=None, aud=None):
+        """__init___."""
+        self.iss = iss
+        self.sub = sub
+        self.aud = aud
+        self.keyjar = keyio.KeyJar()
+        if private_key:
+            self._add_key_to_keyjar(private_key)
+        if public_key:
+            self._add_key_to_keyjar(public_key, owner=self.iss)
+
+    def _add_key_to_keyjar(self, pkey, owner=''):
+        kb = keyio.KeyBundle()
+        priv_key = RSA.importKey(pkey)
+        key = RSAKey().load_key(priv_key)
+        key.use = "sig"
+        kb.append(key)
+        self.keyjar.add_kb(owner, kb)
+
+    def sign_jwt(self):
+        """sign_jwt."""
+        jwt = oic_jwt.JWT(self.keyjar, iss=self.iss)
+        jws = jwt.pack(sub=self.sub, aud=self.aud)
+        return jws
+
+    def verify(self, jws):
+        """verify."""
+        jwt = oic_jwt.JWT(self.keyjar)
+        return jwt.unpack(jws)
+
+TOKEN_URL = "https://{}:8009/token"
+REVOKE_URL = "https://{}:8009/revoke"
+REST_GET_LOG_CONFIG = "https://{}:8008/api/running/logging"
+
+
+class State:
+    """State."""
+
+    def __init__(self):
+        """__init___."""
+        self.access_token = None
+        _, self.cert, _ = certs.get_bootstrap_cert_and_key()
+
+    def teardown(self):
+        """teardown."""
+        print("\n=== Done with Tests ===")
+
+
+@pytest.fixture(scope="session")
+def state():
+    """state."""
+    st = State()
+    yield st
+    st.teardown()
+
+
+@pytest.mark.incremental
+class TestJwtBearer:
+    """TestJwtBearer."""
+
+    def generate_keys(self, key_format='PEM'):
+        """Generate private & public keys."""
+        private = RSA.generate(2048)
+        pri_key = private.exportKey('PEM')
+        private_key = pri_key.decode('utf-8')
+        public = private.publickey()
+        pub_key = public.exportKey(key_format)
+        public_key = pub_key.decode('utf-8')
+        return private_key, public_key
+
+    def test_client_config(
+            self, rw_user_proxy, rbac_user_passwd, user_domain,
+            rbac_platform_proxy, rw_rbac_int_proxy, mgmt_session):
+        """Setting the public key in config."""
+        client_id = '111'
+        rift.auto.mano.create_user(
+            rw_user_proxy, 'test', rbac_user_passwd, user_domain)
+        rift.auto.mano.assign_platform_role_to_user(
+            rbac_platform_proxy, 'rw-rbac-platform:super-admin', 'test',
+            user_domain, rw_rbac_int_proxy)
+        openidc_xpath = (
+            '/rw-openidc-provider:openidc-provider-config/' +
+            'rw-openidc-provider:openidc-client' +
+            '[rw-openidc-provider:client-id={}]'.format(quoted_key(client_id))
+        )
+        config_object = (
+            RwOpenidcProviderYang.
+            YangData_RwOpenidcProvider_OpenidcProviderConfig_OpenidcClient.
+            from_dict({
+                'client_id': client_id,
+                'client_name': 'test',
+                'user_name': 'test',
+                'user_domain': user_domain,
+                'public_key': PUBLIC_KEY}))
+        rw_open_idc_proxy = mgmt_session.proxy(RwOpenidcProviderYang)
+        rw_open_idc_proxy.create_config(openidc_xpath, config_object)
+
+    def test_get_token(self, state, confd_host):
+        """Get the token."""
+        jwt = Jwt(private_key=PRIVATE_KEY, iss="111",
+                  sub="test", aud="https://{}:8009".format(confd_host))
+        jws = jwt.sign_jwt()
+        body_tuple = (
+            ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
+            ("assertion", jws),
+        )
+
+        req = tornado.httpclient.HTTPRequest(
+            url=TOKEN_URL.format(confd_host),
+            method='POST',
+            headers={"Content-Type": "application/x-www-form-urlencoded"},
+            ca_certs=state.cert,
+            body=urllib.parse.urlencode(body_tuple)
+        )
+        client = tornado.httpclient.HTTPClient()
+        resp = client.fetch(req)
+        token_resp = json.loads(resp.body.decode('utf-8'))
+        assert "access_token" in token_resp
+        state.access_token = token_resp["access_token"]
+
+    def test_api_access(self, state, confd_host):
+        """Test api access."""
+        assert state.access_token is not None
+        req = tornado.httpclient.HTTPRequest(
+            url=REST_GET_LOG_CONFIG.format(confd_host),
+            headers={
+                "Authorization": "Bearer " + state.access_token,
+                "Accept": "application/json",
+            },
+            ca_certs=state.cert,
+        )
+        client = tornado.httpclient.HTTPClient()
+        resp = client.fetch(req)
+        assert resp.code == 200 or resp.code == 204
+
+    def test_revoke_token(self, state, confd_host):
+        """Revoke a token."""
+        assert state.access_token is not None
+        body_tuple = (
+            ("token", state.access_token),
+            ("token_type_hint", "access_token"),
+        )
+        req = tornado.httpclient.HTTPRequest(
+            url=REVOKE_URL.format(confd_host),
+            method='POST',
+            headers={
+                "Authorization": "Bearer " + state.access_token,
+                "Content-Type": "application/x-www-form-urlencoded",
+            },
+            ca_certs=state.cert,
+            body=urllib.parse.urlencode(body_tuple)
+        )
+        client = tornado.httpclient.HTTPClient()
+        client.fetch(req)
+
+    def test_api_access_invalid_token(self, state, confd_host):
+        """Test access with invalid token."""
+        assert state.access_token is not None
+        req = tornado.httpclient.HTTPRequest(
+            url=REST_GET_LOG_CONFIG.format(confd_host),
+            headers={
+                "Authorization": "Bearer " + state.access_token,
+                "Accept": "application/json",
+            },
+            ca_certs=state.cert,
+        )
+        client = tornado.httpclient.HTTPClient()
+        with pytest.raises(tornado.httpclient.HTTPError) as excinfo:
+            client.fetch(req)
+        assert excinfo.value.code == 401
+        state.access_token = None
+
+    def test_invalid_client_id(self, state, confd_host):
+        """Test with invalid client-id."""
+        jwt = Jwt(private_key=PRIVATE_KEY, iss="invalid_client",
+                  sub="test", aud="https://{}:8009".format(confd_host))
+        jws = jwt.sign_jwt()
+        body_tuple = (
+            ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
+            ("assertion", jws),
+        )
+
+        req = tornado.httpclient.HTTPRequest(
+            url=TOKEN_URL.format(confd_host),
+            method='POST',
+            headers={"Content-Type": "application/x-www-form-urlencoded"},
+            ca_certs=state.cert,
+            body=urllib.parse.urlencode(body_tuple)
+        )
+        client = tornado.httpclient.HTTPClient()
+        with pytest.raises(tornado.httpclient.HTTPError) as excinfo:
+            client.fetch(req)
+        assert excinfo.value.code == 400
+
+    def test_invalid_key(self, state, confd_host):
+        """Test with invalid key."""
+        jwt = Jwt(private_key=WRONG_PRIVATE_KEY, iss="111",
+                  sub="test", aud="https://{}:8009".format(confd_host))
+        jws = jwt.sign_jwt()
+        body_tuple = (
+            ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
+            ("assertion", jws),
+        )
+
+        req = tornado.httpclient.HTTPRequest(
+            url=TOKEN_URL.format(confd_host),
+            method='POST',
+            headers={"Content-Type": "application/x-www-form-urlencoded"},
+            ca_certs=state.cert,
+            body=urllib.parse.urlencode(body_tuple)
+        )
+        client = tornado.httpclient.HTTPClient()
+        with pytest.raises(tornado.httpclient.HTTPError) as excinfo:
+            client.fetch(req)
+        assert excinfo.value.code == 400
+
+    def test_invalid_user(self, state, confd_host):
+        """Test with invalid user."""
+        jwt = Jwt(private_key=PRIVATE_KEY, iss="111",
+                  sub="invalid_user", aud="https://{}:8009".format(confd_host))
+        jws = jwt.sign_jwt()
+        body_tuple = (
+            ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
+            ("assertion", jws),
+        )
+
+        req = tornado.httpclient.HTTPRequest(
+            url=TOKEN_URL.format(confd_host),
+            method='POST',
+            headers={"Content-Type": "application/x-www-form-urlencoded"},
+            ca_certs=state.cert,
+            body=urllib.parse.urlencode(body_tuple)
+        )
+        client = tornado.httpclient.HTTPClient()
+        with pytest.raises(tornado.httpclient.HTTPError) as excinfo:
+            client.fetch(req)
+        assert excinfo.value.code == 400
+
+    def test_check_basic_functionality(
+            self, rw_user_proxy, rbac_user_passwd, user_domain, state,
+            rbac_platform_proxy, rw_rbac_int_proxy, mgmt_session,
+            session_class, confd_host, rw_project_proxy, cloud_module,
+            cloud_account, descriptors, fmt_nsd_catalog_xpath, logger):
+        """Check basic functionality."""
+        # Add the users to our config with the public key.
+        logger.debug('Create users and add roles for them')
+        for idx in range(1, 5):
+            client_id = '111{}'.format(idx)
+            user_name = 'test_{}'.format(idx)
+            role = roles[idx - 1]
+            rift.auto.mano.create_user(
+                rw_user_proxy, user_name, rbac_user_passwd, user_domain)
+            if 'platform' in role:
+                rift.auto.mano.assign_platform_role_to_user(
+                    rbac_platform_proxy, role, user_name,
+                    user_domain, rw_rbac_int_proxy)
+            else:
+                rift.auto.mano.assign_project_role_to_user(
+                    rw_project_proxy, role, user_name,
+                    'default', user_domain, rw_rbac_int_proxy)
+            openidc_xpath = (
+                '/rw-openidc-provider:openidc-provider-config/' +
+                'rw-openidc-provider:openidc-client[rw-openidc-provider:' +
+                'client-id={}]'.format(quoted_key(client_id))
+            )
+            # Generate PEM keys for some, while for others its openssh keys
+            logger.debug('Generate private & public keys for the user')
+            if idx % 2 == 0:
+                key_format = 'OpenSSH'
+            else:
+                key_format = 'PEM'
+            private_key, public_key = self.generate_keys(key_format)
+            config_object = (
+                RwOpenidcProviderYang.
+                YangData_RwOpenidcProvider_OpenidcProviderConfig_OpenidcClient.
+                from_dict({
+                    'client_id': client_id,
+                    'client_name': user_name,
+                    'user_name': user_name,
+                    'user_domain': user_domain,
+                    'public_key': public_key}))
+            rw_open_idc_proxy = mgmt_session.proxy(RwOpenidcProviderYang)
+            rw_open_idc_proxy.create_config(openidc_xpath, config_object)
+            # Create the jason web signature
+            jwt = Jwt(private_key=private_key, iss=client_id,
+                      sub=user_name, aud="https://{}:8009".format(confd_host))
+            jws = jwt.sign_jwt()
+            body_tuple = (
+                ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
+                ("assertion", jws),
+            )
+            # Get the token using the signature
+            req = tornado.httpclient.HTTPRequest(
+                url=TOKEN_URL.format(confd_host),
+                method='POST',
+                headers={"Content-Type": "application/x-www-form-urlencoded"},
+                ca_certs=state.cert,
+                body=urllib.parse.urlencode(body_tuple)
+            )
+            client = tornado.httpclient.HTTPClient()
+            resp = client.fetch(req)
+            token_resp = json.loads(resp.body.decode('utf-8'))
+            assert "access_token" in token_resp
+            access_token = token_resp["access_token"]
+            user_session = rift.auto.mano.get_session(
+                session_class, confd_host, user_name,
+                rbac_user_passwd, access_token=access_token)
+            rw_rbac_internal_proxy = user_session.proxy(RwRbacInternalYang)
+            # According to the role, checking the functionality
+            if role == 'rw-rbac-platform:super-admin':
+                project_pxy = user_session.proxy(RwProjectYang)
+                rift.auto.mano.assign_project_role_to_user(
+                    project_pxy, 'rw-project:project-admin', 'oper', 'default',
+                    'system', rw_rbac_internal_proxy)
+            elif role == 'rw-project:project-admin':
+                logger.debug('Creating cloud account.')
+                rift.auto.mano.create_cloud_account(
+                    user_session, cloud_account)
+            elif role == 'rw-project-mano:catalog-admin':
+                logger.debug('Uploading descriptors')
+                for descriptor in descriptors:
+                    rift.auto.descriptor.onboard(
+                        user_session, descriptor, project='default')
+                nsd_pxy = user_session.proxy(RwProjectNsdYang)
+                nsd_catalog = nsd_pxy.get_config(
+                    fmt_nsd_catalog_xpath.format(
+                        project=quoted_key('default')))
+                assert nsd_catalog
+            else:
+                project_xpath = '/project[name={project_name}]/project-state'
+                rw_project_proxy = user_session.proxy(RwProjectYang)
+                project = rw_project_proxy.get_config(
+                    project_xpath.format(project_name=quoted_key('default')), list_obj=True)
+                assert project
+
+    def test_with_expired_token(
+            self, state, rw_user_proxy, rbac_user_passwd, user_domain,
+            rbac_platform_proxy, rw_rbac_int_proxy, mgmt_session,
+            session_class, confd_host, cloud_module, cloud_account,
+            logger):
+        """Test with an expired token."""
+        # Set the expiry time for the token as 'expiry_timeout' seconds.
+        client_id = '222'
+        user_name = 'expired_token_user'
+        expiry_timeout = 1
+        rift.auto.mano.create_user(
+            rw_user_proxy, user_name, rbac_user_passwd, user_domain)
+        rift.auto.mano.assign_platform_role_to_user(
+            rbac_platform_proxy, 'rw-rbac-platform:super-admin', user_name,
+            user_domain, rw_rbac_int_proxy)
+
+        openidc_provider_xpath = '/rw-openidc-provider:openidc-provider-config'
+        openidc_provider = (
+            RwOpenidcProviderYang.
+            YangData_RwOpenidcProvider_OpenidcProviderConfig.from_dict({
+                'token_expiry_timeout': expiry_timeout}))
+        pxy = mgmt_session.proxy(RwOpenidcProviderYang)
+        pxy.replace_config(openidc_provider_xpath, openidc_provider)
+
+        # Verify if token_expiry_timeout is set in openidc-provider-config
+        openidc_provider = pxy.get_config(openidc_provider_xpath)
+        assert openidc_provider
+        assert openidc_provider.token_expiry_timeout == expiry_timeout
+        # Set the public key in our config
+        openidc_xpath = (
+            '/rw-openidc-provider:openidc-provider-config/' +
+            'rw-openidc-provider:openidc-client' +
+            '[rw-openidc-provider:client-id={}]'.format(quoted_key(client_id))
+        )
+        config_object = (
+            RwOpenidcProviderYang.
+            YangData_RwOpenidcProvider_OpenidcProviderConfig_OpenidcClient.
+            from_dict({
+                'client_id': client_id,
+                'client_name': user_name,
+                'user_name': user_name,
+                'user_domain': user_domain,
+                'public_key': PUBLIC_KEY}))
+        rw_open_idc_proxy = mgmt_session.proxy(RwOpenidcProviderYang)
+        rw_open_idc_proxy.create_config(openidc_xpath, config_object)
+        # Generate the signature
+        jwt = Jwt(private_key=PRIVATE_KEY, iss=client_id,
+                  sub=user_name, aud="https://{}:8009".format(confd_host))
+        jws = jwt.sign_jwt()
+        body_tuple = (
+            ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
+            ("assertion", jws),
+        )
+        logger.debug('Get the token using the signature')
+        req = tornado.httpclient.HTTPRequest(
+            url=TOKEN_URL.format(confd_host),
+            method='POST',
+            headers={"Content-Type": "application/x-www-form-urlencoded"},
+            ca_certs=state.cert,
+            body=urllib.parse.urlencode(body_tuple)
+        )
+        client = tornado.httpclient.HTTPClient()
+        resp = client.fetch(req)
+        token_resp = json.loads(resp.body.decode('utf-8'))
+        assert "access_token" in token_resp
+        access_token = token_resp["access_token"]
+        # Wait out the expiry timout
+        user_session = rift.auto.mano.get_session(
+            session_class, confd_host, user_name,
+            rbac_user_passwd, access_token=access_token)
+        time.sleep(expiry_timeout + 5)
+        with pytest.raises(
+            Exception,
+                message='Task done with expired token'):
+            user_conman_pxy = user_session.proxy(RwProjectYang)
+            assert user_conman_pxy.get_config(
+                '/project[name={}]/project-state'.format(quoted_key('default')), list_obj=True)
diff --git a/rwlaunchpad/ra/pytest/ns/restapitest/test_inputs/test_inputs.json b/rwlaunchpad/ra/pytest/ns/restapitest/test_inputs/test_inputs.json
new file mode 100644
index 0000000..470bb77
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/restapitest/test_inputs/test_inputs.json
@@ -0,0 +1,38 @@
+{
+    "uint64": [0, 1, -1, "riftio", "riftio@riftio.com",
+               922337203685477580717263457123647172364, "", null,
+               "rif~t¶*¤500"],
+    "uint32": [0, 1, -1, "riftio", "riftio@riftio.com",
+               922337203685477580717263457123647172364, "", null,
+               "rif~t¶*¤500"],
+    "uint16": [0, 1, -1, "riftio", "riftio@riftio.com",
+               922337203685477580717263457123647172364, "", null,
+               "rif~t¶*¤500"],
+    "uint8": [0, 1, -1, "riftio", "riftio@riftio.com",
+              922337203685477580717263457123647172364, "", null,
+              "rif~t¶*¤500"],
+    "decimal64": [0, 1, -1, "riftio", "riftio@riftio.com",
+                  922337203685477580.717263457123647172364, "", null,
+                  "rif~t¶*¤500"],
+    "int64": [0, 1, -1, "riftio", "riftio@riftio.com",
+              922337203685477580717263457123647172364, "", null,
+              "rif~t¶*¤500"],
+    "int32": [0, 1, -1, "riftio", "riftio@riftio.com",
+              922337203685477580717263457123647172364, "", null,
+              "rif~t¶*¤500"],
+    "int16": [0, 1, -1, "riftio", "riftio@riftio.com",
+              922337203685477580717263457123647172364, "", null,
+              "rif~t¶*¤500"],
+    "int8": [0, 1, -1, "riftio", "riftio@riftio.com",
+             922337203685477580717263457123647172364, "", null,
+             "rif~t¶*¤500"],
+    "string": [0, 1, -1, "riftio", "riftio@riftio.com",
+               922337203685477580717263457123647172364,
+               1313213.1321313, "~~&^%*()", "", null,
+               "rif~t¶*¤500"],
+    "union": ["1.1.1.1", null, 0, 1, -1,
+              22337203685477580717263457123647172364,
+              1313213.1321313, "~~&^%*()", "", null,
+              "rif~t¶*¤500", "256.256.256.256",
+              "0.0.0.0"]
+}
diff --git a/rwlaunchpad/ra/pytest/ns/restapitest/test_project_restapi.py b/rwlaunchpad/ra/pytest/ns/restapitest/test_project_restapi.py
new file mode 100644
index 0000000..6857570
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/restapitest/test_project_restapi.py
@@ -0,0 +1,308 @@
+# !/usr/bin/env python
+"""
+#
+#   Copyright 2017 RIFT.io Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@author Anoop Valluthadam (anoop.valluthadam@riftio.com), Vishnu Narayanan K.A
+@brief Create/Delete/Other operations of Projects and User
+"""
+
+import os
+
+from utils.imports import * # noqa
+from utils.traversal_engine import traverse_it
+from utils.utils import parse_input_data
+from utils.tbac_token_utils import * # noqa
+
+headers = {'content-type': 'application/json'}
+
+
+class TestRestAPI(object):
+    """TestRestAPI."""
+
+    def traverse_and_find_all_keys(self, it, key_dict):
+        """Find all keys and their data types present in the json schema.
+
+        Args:
+            it (dict): the json
+            key_dict (dict): will be populated with the keys & their datatypes
+        Returns:
+            key_dict (dict): will be populated with the keys & their datatypes
+        """
+        if (isinstance(it, list)):
+            for item in it:
+                self.traverse_and_find_all_keys(item, key_dict)
+            return key_dict
+
+        elif (isinstance(it, dict)):
+            for key in it.keys():
+                if key == 'name' and 'data-type' in it:
+                    if isinstance(it['data-type'], dict):
+                        dtype = next(iter(it['data-type']))
+                        if ((it[key] in key_dict) and
+                                (dtype not in key_dict[it[key]])):
+
+                            key_dict[it[key]].append(dtype)
+
+                        elif it[key] not in key_dict:
+                            key_dict[it[key]] = [dtype]
+                        else:
+                            pass
+                    else:
+                        if ((it[key] in key_dict) and
+                                (it['data-type'] not in key_dict[it[key]])):
+
+                            key_dict[it[key]].append(it['data-type'])
+
+                        elif it[key] not in key_dict:
+                            key_dict[it[key]] = [it['data-type']]
+                        else:
+                            pass
+                self.traverse_and_find_all_keys(it[key], key_dict)
+            return key_dict
+        else:
+            return None
+
+    def create_post_call(
+            self, data, confd_host, url, logger, state, number_of_tests):
+        """Create the POST.
+
+        Args:
+            data (dict): JSON data
+            confd_host (string): IP addr of the Launchpad
+            url (string): the url for the post call
+            logger (logger Object): log object
+            state: for the tbac token
+            number_of_tests (list): test & error cases count
+        Returns:
+            number_of_tests (list): test & error cases count
+        Raises:
+            requests.exceptions.ConnectionError: in case we loose connection
+            from the Launchpad, mostly when Launchpad crashes
+
+        """
+        number_of_tests[0] += 1
+
+        key = next(iter(data))
+        if 'project' in url:
+            name = str(data[key][0]["name"])
+            new_url = url + name
+        elif 'user-config' in url:
+            name = str(data[key]['user'][0]['user-name'])
+            domain = str(data[key]['user'][0]['user-domain'])
+            data = data['rw-user:user-config']
+            new_url = url + '/user/' + name + ',' + domain
+        else:
+            raise Exception('Something wrong with the URL')
+
+        logger.debug(data)
+        headers['Authorization'] = 'Bearer ' + state.access_token
+        try:
+            create_result = state.session.post(
+                url, data=json.dumps(data),
+                headers=headers, verify=False)
+            get_result = state.session.get(
+                new_url,
+                headers=headers, verify=False)
+            delete_result = state.session.delete(
+                new_url,
+                headers=headers, verify=False)
+        except requests.exceptions.ConnectionError:
+            logger.error('Crashed for the data: \n{}'.format(data))
+            number_of_tests[1] += 1
+            exit(1)
+
+        logger.debug(
+            'create result:\n{}\n{}\n'.format(
+                create_result.status_code, create_result.text))
+        logger.debug(
+            'get result:\n{}\n{}\n'.format(
+                get_result.status_code, get_result.text))
+        logger.debug(
+            'delete result:\n{}\n{}\n'.format(
+                delete_result.status_code, delete_result.text))
+
+        return number_of_tests
+
+    def get_schema(self, confd_host, url, property_=None):
+        """Get schema.
+
+        Args:
+            confd_host (string): Launchpad IP
+            property_ (string): vnfd/nsd/user etc
+        Returns:
+            schema (JSON): Schema in JSON format
+        """
+        headers = {'content-type': 'application/json'}
+
+        result = requests.get(url, auth=HTTPBasicAuth('admin', 'admin'),
+                              headers=headers, verify=False)
+
+        schema = json.loads(result.text)
+
+        return schema
+
+    def traverse_call(
+            self, test_input, data, k_dict, confd_host, logger,
+            number_of_tests, depth, url, state):
+        """Traversing through the values from the test IP JSON.
+
+        Args:
+            test_input (string): the data from the test IP JSON
+            data (json): schema data
+            k_dict (dict): dictionary of the JSON IP
+            confd_host (string): Launchpad IP
+            logger (logger obj): log object
+            number_of_tests (list): test & error cases count
+            depth (int): depth of the json
+            url (string): the url for the post call
+            state: for the tbac token
+        Returns:
+            number_of_tests (list): test & error cases count
+        """
+        for key, kdata_types in k_dict.items():
+            for kdata_type in kdata_types:
+                if kdata_type in test_input:
+                    test_values = test_input[kdata_type]
+                    for test_value in test_values:
+                        test_data = {kdata_type: test_value}
+                        # Actual traversal call which will generate data
+                        json_data = traverse_it(
+                            data, original=False,
+                            test_value=test_data, test_key=key,
+                            max_depth=depth)
+
+                        number_of_tests = self.create_post_call(
+                            json_data, confd_host, url,
+                            logger, state, number_of_tests)
+
+        return number_of_tests
+
+    def test_get_token(
+            self, rw_user_proxy, rbac_user_passwd, user_domain,
+            rbac_platform_proxy, rw_rbac_int_proxy, mgmt_session, state):
+        """Setting the public key in config and get token."""
+        client_id = '1234'
+        rift.auto.mano.create_user(
+            rw_user_proxy, 'test', rbac_user_passwd, user_domain)
+        rift.auto.mano.assign_platform_role_to_user(
+            rbac_platform_proxy, 'rw-rbac-platform:super-admin', 'test',
+            user_domain, rw_rbac_int_proxy)
+        openidc_xpath = (
+            '/rw-openidc-provider:openidc-provider-config/' +
+            'rw-openidc-provider:openidc-client' +
+            '[rw-openidc-provider:client-id={}]'.format(quoted_key(client_id))
+        )
+        config_object = (
+            RwOpenidcProviderYang.
+            YangData_RwOpenidcProvider_OpenidcProviderConfig_OpenidcClient.
+            from_dict({
+                'client_id': client_id,
+                'client_name': 'test',
+                'user_name': 'test',
+                'user_domain': 'tbacdomain',
+                'public_key': PUBLIC_KEY}))
+        rw_open_idc_proxy = mgmt_session.proxy(RwOpenidcProviderYang)
+        rw_open_idc_proxy.create_config(openidc_xpath, config_object)
+
+        # Get the token
+        jwt = Jwt(private_key=PRIVATE_KEY, iss=client_id,
+                  sub="test", aud="https://locahost:8009")
+        jws = jwt.sign_jwt()
+        body_tuple = (
+            ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
+            ("assertion", jws),
+        )
+
+        req = tornado.httpclient.HTTPRequest(
+            url=TOKEN_URL,
+            method='POST',
+            headers={"Content-Type": "application/x-www-form-urlencoded"},
+            ca_certs=state.cert,
+            body=urllib.parse.urlencode(body_tuple)
+        )
+        client = tornado.httpclient.HTTPClient()
+        resp = client.fetch(req)
+        token_resp = json.loads(resp.body.decode('utf-8'))
+        assert "access_token" in token_resp
+        state.access_token = token_resp["access_token"]
+
+        auth_value = 'Bearer ' + state.access_token
+        state.session = requests.Session()
+        state.session.headers.update({
+            'content-type': 'application/json',
+            'Authorization': auth_value
+        })
+
+    def test_user_restapi(self, confd_host, logger, state):
+        """Test user creation restapi."""
+        rift_install = os.getenv('RIFT_INSTALL')
+        file_path = (
+            '{}/usr/rift/systemtest/pytest/'.format(rift_install) +
+            'system/ns/restapitest/test_inputs/test_inputs.json')
+        test_input = parse_input_data(file_path)
+        schema_url_for_user = (
+            "https://{}:8008/v2/api/schema/user-config/".format(confd_host)
+        )
+        url_for_user = (
+            "https://{}:8008/v2/api/config/user-config".format(confd_host)
+        )
+        data = self.get_schema(confd_host, schema_url_for_user)
+
+        key_dict = {}
+        k_dict = self.traverse_and_find_all_keys(data, key_dict)
+
+        number_of_tests = [0, 0]  # [total no. of tests, no. of erros]
+        # Traverse with depth but with out any specific key
+        for depth in range(14, 15):
+                number_of_tests = self.traverse_call(
+                    test_input, data["user-config"], k_dict, confd_host,
+                    logger, number_of_tests, depth, url_for_user, state)
+        logger.debug(
+            'No of tests ran for userapi: {}'.format(number_of_tests[0]))
+        logger.debug(
+            'No of crashed tests for userapi:{}'.format(number_of_tests[1]))
+
+    def test_project_restapi(self, confd_host, logger, state):
+        """Test project creation restapi."""
+        rift_install = os.getenv('RIFT_INSTALL')
+        file_path = (
+            '{}/usr/rift/systemtest/pytest/'.format(rift_install) +
+            'system/ns/restapitest/test_inputs/test_inputs.json')
+        test_input = parse_input_data(file_path)
+
+        schema_url_for_project = (
+            "https://{}:8008/v2/api/schema/project/".format(confd_host)
+        )
+        url_for_project = (
+            "https://{}:8008/v2/api/config/project/".format(confd_host)
+        )
+        data = self.get_schema(confd_host, schema_url_for_project)
+
+        key_dict = {}
+        k_dict = self.traverse_and_find_all_keys(data, key_dict)
+
+        number_of_tests = [0, 0]  # [total no. of tests, no. of erros]
+
+        # Traverse with depth but with out any specific key
+        for depth in range(5, 6):
+                number_of_tests = self.traverse_call(
+                    test_input, data["project"], k_dict, confd_host,
+                    logger, number_of_tests, depth, url_for_project, state)
+        logger.debug(
+            'No of tests ran for projectapi: {}'.format(number_of_tests[0]))
+        logger.debug(
+            'No of crashed tests for projectapi:{}'.format(number_of_tests[1]))
diff --git a/rwlaunchpad/ra/pytest/ns/restapitest/utils/__init__.py b/rwlaunchpad/ra/pytest/ns/restapitest/utils/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/restapitest/utils/__init__.py
diff --git a/rwlaunchpad/ra/pytest/ns/restapitest/utils/imports.py b/rwlaunchpad/ra/pytest/ns/restapitest/utils/imports.py
new file mode 100644
index 0000000..942c696
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/restapitest/utils/imports.py
@@ -0,0 +1,39 @@
+# !/usr/bin/env python
+"""
+#
+#   Copyright 2017 RIFT.io Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file imports.py
+@author Anoop Valluthadam (anoop.valluthadam@riftio.com)
+"""
+
+import json # noqa
+import socket  # noqa
+import struct # noqa
+import requests # noqa
+import random # noqa
+import logging # noqa
+import uuid # noqa
+import decimal # noqa
+import argparse # noqa
+import datetime # noqa
+import time # noqa
+
+from logging import handlers # noqa
+from signal import SIGTERM # noqa
+from requests.auth import HTTPBasicAuth # noqa
+from random import choice # noqa
+from string import ascii_lowercase # noqa
diff --git a/rwlaunchpad/ra/pytest/ns/restapitest/utils/tbac_token_utils.py b/rwlaunchpad/ra/pytest/ns/restapitest/utils/tbac_token_utils.py
new file mode 100644
index 0000000..2d4fe85
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/restapitest/utils/tbac_token_utils.py
@@ -0,0 +1,198 @@
+#!/usr/bin/env python3
+"""
+#
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+"""
+# RIFT_IO_STANDARD_COPYRIGHT_HEADER(BEGIN)
+# Author(s): Balaji Rajappa, Vishnu Narayanan K.A
+# Creation Date: 2017-07-07
+# RIFT_IO_STANDARD_COPYRIGHT_HEADER(END)
+
+import gi
+import json
+import urllib.parse
+
+import rift.auto.mano
+import pytest
+import tornado.httpclient
+import time
+import Cryptodome.PublicKey.RSA as RSA
+
+import oic.utils.jwt as oic_jwt
+import oic.utils.keyio as keyio
+from jwkest.jwk import RSAKey
+from rift.rwlib.util import certs
+gi.require_version('RwOpenidcProviderYang', '1.0')
+gi.require_version('RwRbacInternalYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwKeyspec', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+from gi.repository import ( # noqa
+    RwOpenidcProviderYang,
+    RwProjectNsdYang,
+    RwProjectYang,
+    RwRbacInternalYang,
+    RwConmanYang,
+)
+from gi.repository.RwKeyspec import quoted_key # noqa
+
+
+@pytest.fixture(scope='session')
+def rbac_user_passwd():
+    """A common password being used for all rbac users."""
+    return 'mypasswd'
+
+
+@pytest.fixture(scope='session')
+def user_domain(tbac):
+    """user-domain being used in this rbac test."""
+    if tbac:
+        return 'tbacdomain'
+    return 'system'
+
+
+PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAs9bRFjWofNeWq2qtsvH9iDZXXbv5NQI6avK1hSt+0W0g3SXW
+hllNenZAhFpXHzZvJk2qEoNIRXIeonX4N62FBLD7ZoWHQDGahkyfhxML4jYA3KUa
+PWGeUvMlRPkoR4NjHA3zXQvD2FwTtcKCulGYQHRAAyATIcNq0kKZMuMAJxC5A7VD
+vQVb7vOaN01YxJt+L6KF0v4ZiYdse5yBI/X58i2gnLqy102Oqj2qZygazj5LLdTE
+sjgsiC9ln6kesbRayXiqi+RnF+BeKKlwGCuUpH+vFGxXmT6Kr4iEiGIHxAs/HZOS
+9m61z1eHjrce654mpqwbeqhsyQZswyab2IpERwIDAQABAoIBABrnK+gypr3mUnfa
+QZnfcZoK5w7yq9kuOCb/oAAH/bS+qofqvSjj+x8yyXwDN71Hm2EThTm3wfwBkmuj
+UjqiDqAhCbrQhucnhIJKvCKsyr5QbdiUKlS8Yb7u+MhUrZ3lHdJ4k8t7kxSu0ZQD
+QSM2SZx6x4iwJ6yJW1WQ+PIP21n8ejraQ9PzqpuUsNXh05DU8qN/nJHe311D5ZuB
+UnSHdfGaF+EBbNxPLzV028db+L9m3a+h87uZhyqwRlUXP+swlToVNvF74bs+mflz
+r5JN6CwRM3VamnwmcnE77D/zyCsP1Js9LgoxhzhdcUwIOYVWRzUUVRCsrtYOSGF7
+WBzC3WECgYEA0hGtnBw5rryubv0kWDjZoVGvuwDo7BOW1JFXZYJwvweEj7EjWFTY
+bVk+MYs1huG+0NpNuhw6IYmDPIEkoLVNGuTHBMnA+SzQx/xv719b1OmY0Wl8ikYd
+Xlmhxr7mjAJX4eqkVTrBGtsi6TCLdk3HnUdpXJQ0k2aUN6hNFJfsmhUCgYEA2ykP
+hdVzP1ZtXsHEfHSOfRPIzX9gCLETghntAf44MCF+hHZeEVnuTSrfeqELvy5qCarA
+FgjZ77p7q6R7YP2KBQUc/gzZStjGIOCPv9xI8otXrmQRVXOxWNafeDp+TOPa2o9S
+2bBovNmN4Kc+ayktATCVuabMbuGiMIPuRY1pR+sCgYEAmdJSEw7j+hy1ihYZJ/Sw
+/5xmFoQLCtspRgwLOAx07Jzfp6xpGkQ+mouPrA2oq1TgOeSwp8gFlQsxqvtRy9AW
+XswJI2tsv8jeNKKXgGuOPfCzcxxQEpxW4wC1ImglP35zxbzginxUbIrsHF7ssDsy
+IOvqrdzkRs8FV2AI2TyKByUCgYEAuhdDdwCnu0BH3g3qKUNPOiVyfAuMH9U8G1yo
+Quj6DORj6VYYyeLy1dNxr07QCqX+o/a44/zgEQ7ns/cWTGT8rQaKd62xVDx8/62u
+YdtKlah76zhM/6IdFLIo9o20cNWJH8xTLUT9ql2QexGHjraH4FrAx8M6E2zDqy5b
+Q/OvUcECgYAjt8XosvUiRpZ1ugMxwAx316IIEgs2u7k4xdQESnVhIOM3Ex5ikXkK
+I0Hu/2XPH3KO6+6BOhtdZ4qXLf4hikbIisgq3P87Fb2rUElYZjVm3vClYhEzx6ym
+bSWO/cZTpp9L14qMuWzb13pD20GExPOIBh1m0exvoL3M8JhLShutWw==
+-----END RSA PRIVATE KEY-----"""
+
+PUBLIC_KEY = """-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAs9bRFjWofNeWq2qtsvH9
+iDZXXbv5NQI6avK1hSt+0W0g3SXWhllNenZAhFpXHzZvJk2qEoNIRXIeonX4N62F
+BLD7ZoWHQDGahkyfhxML4jYA3KUaPWGeUvMlRPkoR4NjHA3zXQvD2FwTtcKCulGY
+QHRAAyATIcNq0kKZMuMAJxC5A7VDvQVb7vOaN01YxJt+L6KF0v4ZiYdse5yBI/X5
+8i2gnLqy102Oqj2qZygazj5LLdTEsjgsiC9ln6kesbRayXiqi+RnF+BeKKlwGCuU
+pH+vFGxXmT6Kr4iEiGIHxAs/HZOS9m61z1eHjrce654mpqwbeqhsyQZswyab2IpE
+RwIDAQAB
+-----END PUBLIC KEY-----"""
+
+WRONG_PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEA230Ic8gqYGrIYPffrgvS9ezrI94+TMwIX0A3nyi6nRBOAzuV
+OMP0L4OegDLnAkyUC4ZiH6B9uAJ1mbp4WsX0Q2a3FuGzscCfriV0JKRd4256Mj60
+bGq7xLqR/d62IzLrQ2eJCQe2IspwUIeAW301igwoPIGTfZurQ6drXBcbRVo7adry
+V3+TGsfQVge95IyVAPm4A7kcJsdQu9HsD7Hp9LIM35B3oHCOF7hHP/MEEAz84Q6q
+lpWxdTzSnIxDXWxS2BqPInKOIL5egpn69AfJKLj+QPpQymULx3FCeNKeHmSICHtP
+r0uTckEek0kfFT2W6hIU1w1f+Pkddhc1fY45VQIDAQABAoIBABvOsHZywqOqg659
+WPJk/xo3JOdLbdsu8lSW/zUD5PinKysPrm0drl8irr8RM+E/sHXxVZcqLyNT9HBA
+hqUBdVvgtIuKlsiLXe+jQR6vUFHTGlopRZSCxT08YeinAa5d8h59DIh/WJz5xtb9
+A88Tguf1eFeKFxSP11ff6yMkrkjP1KmvNRoTAC0MU3p/N6UT03roR9v6n4qGPF6p
+/fy6uhLWSJVl7IGFL18DEODid64ShK37VytnvLAMQp8OzL87OdoUW6qrA+z4FP59
+XSpXULxn6ayJG3VChT+Y+nb23rC6gzCYYb3qkSwep2xNqfblP8jL2k/NSlbshdiz
+j3BfK8ECgYEA6D7SMCXZ2hBYu8EBoGRmMLdtM+spps61JOAhgy2i9aNQ/YlKfuS9
+kvNFqT1DEpQsjcRmZIEVb5uJQJYUDx6zj4eUSzkISvziz43dg4RKpC/ktprp9RQ1
+8sAQD4n5Xy2chdTQHKfGl4oF5b16wpi0eE97XptDOlLgPhk167woUQUCgYEA8fAt
+8uZxw0aKkQbF+tYItsWQQP87dJGUeLna4F3T6q5L5WJYCtFqILiFfWwfcjEaOKWV
+JzKr0f9pLrRxXYdFUxNolOhA1hZCqZu2ZzpSlfsPWhp2WflGi6DqzSByhgVuwHbV
+pRl0TRE2dQVgpuXxxiURREHoHJPZRc+3sOwU+BECgYAZJXQssmx8J/jzm1pJu5U1
+ASdZz8Sawxbp/zqhsXdLkXtbeFoQk0PTfXO1d2Sjxldsoi9UAoYHp5ec3qMdX/2h
+NNThsDMtq2QDhSDO9KwASw9AllVuq9mLhzA1/oJ5w76G3xwJfkEKd29cCMAaAd7I
+iBKbk8QbtI2DK8ei1qSm4QKBgAPHvPAOqbhjYcbiVDWXIou4ioh5dHRd0fQQ81qO
+HMGN96Gd58JDg2T/fRZ4mgUuvzojXDFAmW6ujvYr25mag3rI0tmAx4KQ1nnP9Qmn
+36J4ScUepLrDKlcELKcH2sI9U32uXag2vZp2qmMpsljpPt3ZtmtanEXWCY8Nr9ET
+30ABAoGAQ63wGwq1LPS6t/zU6CwOlIzGNnHDquO7o1o/h8IPt3BN6yF0NEVItjdi
+fL2ZwmBCUbO6Y/Jb1kh4a0iohWF33nS3J4Q6wSQUfBMG5jDI7GfuKAgTQl+sMkOM
+xjyKrWs/y7HtiP/2vf83QVEL8Bxr3WXdXHj1EBHFEMWA576J6mk=
+-----END RSA PRIVATE KEY-----"""
+
+roles = (
+    'rw-rbac-platform:super-admin', 'rw-project:project-admin',
+    'rw-project-mano:catalog-admin', 'rw-project:project-oper'
+)
+
+
+class Jwt:
+    """Jwt."""
+
+    def __init__(
+            self, private_key=None, public_key=None,
+            iss=None, sub=None, aud=None):
+        """__init___."""
+        self.iss = iss
+        self.sub = sub
+        self.aud = aud
+        self.keyjar = keyio.KeyJar()
+        if private_key:
+            self._add_key_to_keyjar(private_key)
+        if public_key:
+            self._add_key_to_keyjar(public_key, owner=self.iss)
+
+    def _add_key_to_keyjar(self, pkey, owner=''):
+        kb = keyio.KeyBundle()
+        priv_key = RSA.importKey(pkey)
+        key = RSAKey().load_key(priv_key)
+        key.use = "sig"
+        kb.append(key)
+        self.keyjar.add_kb(owner, kb)
+
+    def sign_jwt(self):
+        """sign_jwt."""
+        jwt = oic_jwt.JWT(self.keyjar, iss=self.iss)
+        jws = jwt.pack(sub=self.sub, aud=self.aud)
+        return jws
+
+    def verify(self, jws):
+        """verify."""
+        jwt = oic_jwt.JWT(self.keyjar)
+        return jwt.unpack(jws)
+
+TOKEN_URL = "https://localhost:8009/token"
+REVOKE_URL = "https://localhost:8009/revoke"
+REST_GET_LOG_CONFIG = "https://localhost:8008/api/running/logging"
+
+
+class State:
+    """State."""
+
+    def __init__(self):
+        """__init___."""
+        self.access_token = None
+        _, self.cert, _ = certs.get_bootstrap_cert_and_key()
+
+    def teardown(self):
+        """teardown."""
+        print("\n=== Done with Tests ===")
+
+
+@pytest.fixture(scope="session")
+def state():
+    """state."""
+    st = State()
+    yield st
+    st.teardown()
\ No newline at end of file
diff --git a/rwlaunchpad/ra/pytest/ns/restapitest/utils/traversal_engine.py b/rwlaunchpad/ra/pytest/ns/restapitest/utils/traversal_engine.py
new file mode 100644
index 0000000..f476ed8
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/restapitest/utils/traversal_engine.py
@@ -0,0 +1,262 @@
+
+from .imports import * # noqa
+
+
+def populate_data(data_type, original=True, test_value={}, keys={}):
+    """Generate data from schema depends its Data-type
+    Args:
+        data_type (string): data_type from the test IP json
+        original (boolean): if it is True,
+                            will generate normal JSON with randon
+                            values
+        test_value (dict): will be like this {'string': '-1'}, means, if
+                           string data typr comes, data will be -1
+        keys (dict): if this is present, value testing for this particular
+                     key
+    Returns:
+        string_ (string): string value
+    """
+
+    if original:
+        if (isinstance(data_type, dict)):
+            if 'enumeration' in data_type:
+                string_ = list(data_type['enumeration']['enum'])[0]
+                return string_
+            if 'leafref' in data_type:
+                data_type = 'leafref'
+            if 'union' in data_type:
+                data_type = 'union'
+
+        if data_type == 'string':
+            string_ = ''.join(choice(ascii_lowercase) for i in range(12))
+        elif data_type == 'uint64':
+            string_ = uuid.uuid4().int & (1 << 64) - 1
+        elif data_type == 'uint8':
+            string_ = uuid.uuid4().int & (1 << 8) - 1
+        elif data_type == 'uint32':
+            string_ = uuid.uuid4().int & (1 << 32) - 1
+        elif data_type == 'uint16':
+            string_ = uuid.uuid4().int & (1 << 16) - 1
+        elif data_type == 'decimal64':
+            string_ = float(decimal.Decimal('%d.%d'
+                                            % (random.randint(0, 2134342),
+                                               random.randint(0, 999))))
+        elif data_type == 'int64':
+            string_ = random.randint(0, 1000000000000)
+        elif data_type == 'int32':
+            string_ = random.randint(0, 1000000000)
+        elif data_type == 'int16':
+            string_ = random.randint(0, 10000)
+        elif data_type == 'leafref':
+            string_ = 'leafref_data-type'
+        elif data_type == 'union':
+            string_ = socket.inet_ntoa(
+                struct.pack('>I', random.randint(1, 0xffffffff)))
+        elif data_type == 'boolean':
+            string_ = True
+        else:
+            string_ = data_type
+
+        return string_
+    else:
+        if (isinstance(data_type, dict)):
+            if 'enumeration' in data_type:
+                string_ = list(data_type['enumeration']['enum'])[0]
+                return string_
+            if 'leafref' in data_type:
+                data_type = 'leafref'
+            if 'union' in data_type:
+                data_type = 'union'
+
+        # print(data_type, test_value)
+        if not (isinstance(data_type, dict)):
+            if keys and keys[list(keys)[0]]:
+                if list(keys.values())[0] in keys:
+                    if data_type in test_value:
+                        return test_value[data_type]
+            else:
+                if data_type in test_value:
+                    return test_value[data_type]
+
+        if data_type == 'string':
+            string_ = ''.join(choice(ascii_lowercase) for i in range(12))
+        elif data_type == 'uint64':
+            string_ = uuid.uuid4().int & (1 << 64) - 1
+        elif data_type == 'uint8':
+            string_ = uuid.uuid4().int & (1 << 8) - 1
+        elif data_type == 'uint32':
+            string_ = uuid.uuid4().int & (1 << 32) - 1
+        elif data_type == 'uint16':
+            string_ = uuid.uuid4().int & (1 << 16) - 1
+        elif data_type == 'decimal64':
+            string_ = float(decimal.Decimal('%d.%d'
+                                            % (random.randint(0, 99999999),
+                                               random.randint(0, 999))))
+        elif data_type == 'int64':
+            string_ = random.randint(0, 99999999)
+        elif data_type == 'int32':
+            string_ = random.randint(0, 999999)
+        elif data_type == 'int16':
+            string_ = random.randint(0, 999999)
+        elif data_type == 'leafref':
+            string_ = 'leafref_data-type'
+        elif data_type == 'union':
+            string_ = socket.inet_ntoa(
+                struct.pack('>I', random.randint(1, 0xffffffff)))
+        elif data_type == 'boolean':
+            string_ = True
+        else:
+            string_ = data_type
+
+        return string_
+
+
+def traverse_it(it, path='', data_json={}, original=True, test_value={},
+                test_key=None, avoid=[], depth=0, max_depth=0):
+    """Main recursicve traversel method, which will go through the schema
+    and generate data JSON
+
+    Args:
+        it (json): schema
+        data_json (dict): used to generate the data for particular key which is
+                          present in this dict
+        original (boolean): used to generate original(complete) data JSON
+        test_value (dict): data type and the corresponding value which is
+                           getting replaced generated
+        test_key (string): the key which is gonna get tested
+        avoid (list): these keys will get avoided while JSON is getting
+                      created
+        depth (int): depth of the JSON
+        max_depth (int: will be the max depth of the JSON)
+
+    Returns:
+        Json data
+    """
+
+    if (isinstance(it, list)):
+        temp = {}
+        depth += 1
+        if depth == max_depth:
+            return []
+        for item in it:
+            # print(path)
+
+            x = traverse_it(item, path=path, data_json=data_json,
+                            original=original,
+                            test_value=test_value, test_key=test_key,
+                            avoid=avoid,
+                            depth=depth,
+                            max_depth=max_depth)
+            temp.update(x)
+        return temp
+    elif (isinstance(it, dict)):
+        if 'name' in it.keys():
+            if it['name'] == 'disabled':
+                temp = [{it['name']: ''}, {}]
+                return random.choice(temp)
+            path = path + '/' + it['name']
+        if 'type' in it.keys():
+
+            if it['type'] == 'container':
+                depth += 1
+                if depth == max_depth:
+                    return {}
+                data_json = {
+                    it['name']: traverse_it(it['properties'],
+                                            path=path, data_json=data_json,
+                                            original=original,
+                                            test_value=test_value,
+                                            test_key=test_key,
+                                            avoid=avoid,
+                                            depth=depth,
+                                            max_depth=max_depth)
+                }
+                return data_json
+            elif it['type'] == 'list':
+                for item_check in it['properties']:
+
+                    if 'data-type' in item_check:
+                        if (isinstance(item_check['data-type'], dict)):
+                            if 'leafref' in item_check['data-type']:
+                                temp = {it['name']: []}
+                                return temp
+                depth += 1
+
+                if depth == max_depth:
+                    return {}
+
+                temp = {
+                    it['name']:
+                    [traverse_it(it['properties'], path=path,
+                                 data_json=data_json,
+                                 original=original,
+                                 test_value=test_value, test_key=test_key,
+                                 avoid=avoid,
+                                 depth=depth,
+                                 max_depth=max_depth)]
+                }
+                return temp
+            elif it['type'] == 'case':
+                for item_check in it['properties']:
+                    if 'data-type' in item_check:
+                        if (isinstance(item_check['data-type'], dict)):
+                            if 'leafref' in item_check['data-type']:
+                                return {}
+                depth += 1
+                if depth == max_depth:
+                    return {}
+
+                return traverse_it(it['properties'][0], path=path,
+                                   data_json=data_json,
+                                   original=original,
+                                   test_value=test_value, test_key=test_key,
+                                   avoid=avoid,
+                                   depth=depth,
+                                   max_depth=max_depth)
+            elif it['type'] == 'choice':
+                depth += 1
+
+                if depth == max_depth:
+                    return {}
+
+                return traverse_it(it['properties'][0], path=path,
+                                   data_json=data_json,
+                                   original=original,
+                                   test_value=test_value, test_key=test_key,
+                                   avoid=avoid,
+                                   depth=depth,
+                                   max_depth=max_depth)
+            elif it['type'] == 'leaf':
+                # print(data_json)
+                if it['name'] in avoid:
+                    return {}
+                if 'data-type' in it:
+                    if 'subnet-address' == it['name']:
+                        data = '255.255.255.0/24'
+                    elif 'numa-unaware' == it['name']:
+                        data = ''
+                    elif 'ephemeral' == it['name']:
+                        data = ''
+                    else:
+                        data = populate_data(it['data-type'],
+                                             original=original,
+                                             test_value=test_value,
+                                             keys={it['name']: test_key})
+                return {it['name']: data}
+            else:
+                if 'subnet-address' == it['name']:
+                    data = '255.255.255.0/24'
+                elif 'numa-unaware' == it['name']:
+                    data = ''
+                elif 'ephemeral' == it['name']:
+                    data = ''
+                else:
+                    data = populate_data(it['data-type'],
+                                         original=original,
+                                         test_value=test_value,
+                                         keys={it['name']: test_key})
+            return {it['name']: data}
+
+        else:
+            print('Error in the JSON!')
+            exit(1)
diff --git a/rwlaunchpad/ra/pytest/ns/restapitest/utils/utils.py b/rwlaunchpad/ra/pytest/ns/restapitest/utils/utils.py
new file mode 100644
index 0000000..c664572
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/restapitest/utils/utils.py
@@ -0,0 +1,28 @@
+
+from .imports import * # noqa
+
+
+def parse_cli():
+    """Parse command line options
+    """
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--confd-host", help="confd IP",
+                        dest='confd_host',
+                        default='127.0.0.1')
+    args = parser.parse_args()
+
+    return args
+
+
+def parse_input_data(file_name):
+    """
+    open the input file and make into a python Dict Obj
+    """
+
+    data = ''
+
+    with open(file_name, 'r') as ipf:
+        data = json.load(ipf)
+
+    return data
diff --git a/rwlaunchpad/ra/pytest/ns/test_multiple_ns_instantiation.py b/rwlaunchpad/ra/pytest/ns/test_multiple_ns_instantiation.py
new file mode 100644
index 0000000..ed1da4c
--- /dev/null
+++ b/rwlaunchpad/ra/pytest/ns/test_multiple_ns_instantiation.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python
+#
+#   Copyright 2016-2017 RIFT.io Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+import gi
+import numpy as np
+import os
+import pytest
+import random
+import time
+
+import rift.auto.descriptor
+from rift.auto.os_utils import get_mem_usage, print_mem_usage
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
+from gi.repository import (
+    RwNsrYang,
+    RwVnfrYang,
+    RwVlrYang,
+    RwProjectNsdYang,
+    RwProjectVnfdYang,
+    )
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+@pytest.fixture(scope='module')
+def rwvnfr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfrYang)
+
+
+@pytest.fixture(scope='module')
+def rwvlr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVlrYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwProjectNsdYang)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwProjectVnfdYang)
+
+
+@pytest.mark.setup('multiple_ns_setup')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestMultipleNsSetup(object):
+    def test_onboard_descriptors(self, logger, mgmt_session, descriptors, nsd_proxy, vnfd_proxy):
+        """Onboards the VNF, NS packages required for the test"""
+        vnfds, nsds = [], []
+        for descriptor in descriptors:
+            pkg_type = rift.auto.descriptor.get_package_type(descriptor)
+            if pkg_type == 'NSD':
+                nsds.append(descriptor)
+            elif pkg_type == 'VNFD':
+                vnfds.append(descriptor)
+
+        pkgs_in_upload_seq = vnfds + nsds
+        logger.debug('Packages in sequence of upload: {}'.format([os.path.basename(pkg) for pkg in pkgs_in_upload_seq]))
+
+        for pkg in pkgs_in_upload_seq:
+            logger.debug('Uploading package {}'.format(pkg))
+            rift.auto.descriptor.onboard(mgmt_session, pkg) # Raise exception if the upload is not successful
+
+        # Verify if the packages are uploaded
+        assert len(vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog').vnfd) == len(vnfds)
+        assert len(nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog').nsd) == len(nsds)
+
+
+@pytest.mark.depends('multiple_ns_setup')
+@pytest.mark.incremental
+class TestMultipleNsInstantiate(object):
+    def test_instantiate_ns_mem_check(self, logger, rwvnfr_proxy, nsd_proxy,
+                                      rwnsr_proxy, rwvlr_proxy,
+                                      cloud_account_name, descriptors):
+        """It runs over a loop. In each loop, it instantiates a NS,
+        terminates the NS, checks memory usage of the system.
+        During memory check, it verifies whether current system
+        mem usage exceeds base memory-usage by a defined threshold.
+        """
+        catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
+
+        # Random NSD sequence generation for NS instantiation
+        iteration, no_of_hours = map(float, pytest.config.getoption('--multiple-ns-instantiate').split(','))
+        nsd_count = len([pkg for pkg in descriptors if 'nsd.' in pkg])
+        nsd_instantiate_seq = np.random.choice(list(range(nsd_count)), int(iteration))
+        random.shuffle(nsd_instantiate_seq)
+
+        logger.debug('nsd instantiaion sequence: {}'.format([catalog.nsd[seq].name for seq in nsd_instantiate_seq]))
+
+        # Collect mem-usage of the system
+        base_system_rss = get_mem_usage()
+        print_mem_usage()
+
+        start_time = time.time()
+        total_duration_in_secs = no_of_hours * 60 * 60
+        # Loop through NSD instantiation sequence and instantiate the NS
+        for idx, seq in enumerate(nsd_instantiate_seq, 1):
+            # Instantiating NS
+            nsd = catalog.nsd[seq]
+            logger.debug('Iteration {}: Instantiating NS {}'.format(idx, nsd.name))
+
+            nsr = rift.auto.descriptor.create_nsr(cloud_account_name, nsd.name, nsd)
+            rwnsr_proxy.create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr)
+
+            # Verify if NS reaches active state
+            nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata/nsr[ns-instance-config-ref={}]'.format(quoted_key(nsr.id)))
+            assert nsr_opdata is not None
+
+            # Verify NSR instances enter 'running' operational-status
+            for nsr in rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata').nsr:
+                xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(
+                                                quoted_key(nsr.ns_instance_config_ref))
+                rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=400)
+
+            # Verify NSR instances enter 'configured' config-status
+            for nsr in rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata').nsr:
+                xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(quoted_key(nsr.ns_instance_config_ref))
+                rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=400)
+
+            time.sleep(30)  # Let it run for few secs before terminating it
+
+            # Terminates the NSR
+            rift.auto.descriptor.terminate_nsr(rwvnfr_proxy, rwnsr_proxy,
+                                               rwvlr_proxy, logger)
+
+            time.sleep(30)  # After NS termination, wait for few secs before collecting mem-usage
+
+            # Get the mem-usage and compare it with base mem-usage
+            print_mem_usage()
+            curr_system_rss = get_mem_usage()
+            threshold = 5
+            mem_usage_inc = 100 * (curr_system_rss - base_system_rss) / base_system_rss
+            if mem_usage_inc > threshold:
+                assert False, 'There is an increase of {}%% during sequence {}. Base system-rss- {}; Current system-rss- {}'.format(
+                    mem_usage_inc, idx, base_system_rss, curr_system_rss)
+
+            if (time.time() - start_time) > total_duration_in_secs:
+                logger.debug('NS instantiation has been happening for last {} hours (provided limit). Exiting.'.format(
+                    no_of_hours))
+                break
+
+
+@pytest.mark.depends('multiple_ns_setup')
+@pytest.mark.teardown('multiple_ns_setup')
+@pytest.mark.incremental
+class TestMultipleNsTeardown(object):
+    def test_delete_descritors(self, nsd_proxy, vnfd_proxy):
+        """Deletes VNF, NS descriptors"""
+        nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(quoted_key(nsd.id))
+            nsd_proxy.delete_config(xpath)
+
+        nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
+            vnfd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/ns/test_onboard.py b/rwlaunchpad/ra/pytest/ns/test_onboard.py
index 5951ce8..83f74bf 100644
--- a/rwlaunchpad/ra/pytest/ns/test_onboard.py
+++ b/rwlaunchpad/ra/pytest/ns/test_onboard.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 """
-# 
-#   Copyright 2016 RIFT.IO Inc
+#
+#   Copyright 2016-2017 RIFT.io Inc
 #
 #   Licensed under the Apache License, Version 2.0 (the "License");
 #   you may not use this file except in compliance with the License.
@@ -21,12 +21,16 @@
 @brief Onboard descriptors
 """
 
+import gi
 import json
 import logging
+import numpy as np
 import os
 import pytest
-import shlex
+import random
 import requests
+import requests_toolbelt
+import shlex
 import shutil
 import subprocess
 import time
@@ -34,32 +38,42 @@
 
 import rift.auto.mano
 import rift.auto.session
+import rift.auto.descriptor
 
-import gi
 gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
 gi.require_version('RwLaunchpadYang', '1.0')
 gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwStagingMgmtYang', '1.0')
+gi.require_version('RwPkgMgmtYang', '1.0')
+gi.require_version('RwVlrYang', '1.0')
 
 from gi.repository import (
     RwcalYang,
-    NsdYang,
+    RwProjectNsdYang,
     RwNsrYang,
     RwVnfrYang,
     NsrYang,
     VnfrYang,
     VldYang,
-    RwVnfdYang,
+    RwProjectVnfdYang,
     RwLaunchpadYang,
-    RwBaseYang
+    RwBaseYang,
+    RwStagingMgmtYang,
+    RwPkgMgmtYang,
+    RwImageMgmtYang,
+    RwTypes,
+    RwVlrYang
 )
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
 
 logging.basicConfig(level=logging.DEBUG)
 
 
 @pytest.fixture(scope='module')
 def vnfd_proxy(request, mgmt_session):
-    return mgmt_session.proxy(RwVnfdYang)
+    return mgmt_session.proxy(RwProjectVnfdYang)
 
 @pytest.fixture(scope='module')
 def rwvnfr_proxy(request, mgmt_session):
@@ -71,8 +85,13 @@
 
 
 @pytest.fixture(scope='module')
+def rwvlr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVlrYang)
+
+
+@pytest.fixture(scope='module')
 def nsd_proxy(request, mgmt_session):
-    return mgmt_session.proxy(NsdYang)
+    return mgmt_session.proxy(RwProjectNsdYang)
 
 
 @pytest.fixture(scope='module')
@@ -88,30 +107,6 @@
 def endpoint():
     return "upload"
 
-def create_nsr(nsd, input_param_list, cloud_account_name):
-    """
-    Create the NSR record object
-
-    Arguments:
-         nsd              -  NSD
-         input_param_list - list of input-parameter objects
-
-    Return:
-         NSR object
-    """
-    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
-
-    nsr.id = str(uuid.uuid4())
-    nsr.name = rift.auto.mano.resource_name(nsr.id)
-    nsr.short_name = "nsr_short_name"
-    nsr.description = "This is a description"
-    nsr.nsd.from_dict(nsd.as_dict())
-    nsr.admin_status = "ENABLED"
-    nsr.input_parameter.extend(input_param_list)
-    nsr.cloud_account = cloud_account_name
-
-    return nsr
-
 
 def upload_descriptor(
         logger,
@@ -200,47 +195,25 @@
         host=host,
         endpoint=endpoint)
 
-def terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger, wait_after_kill=True):
-    """
-    Terminate the instance and check if the record is deleted.
 
-    Asserts:
-    1. NSR record is deleted from instance-config.
+def get_ns_cloud_resources(rwvnfr_proxy, rwvlr_proxy):
+    """Returns a collection of ports, networks, VMs used by this NS"""
+    ns_cloud_resources = {'ports':[], 'vms':[], 'networks':[]}
 
-    """
-    logger.debug("Terminating NSRs")
+    # Get ports and VMs associated with each VNF
+    vnfrs = rwvnfr_proxy.get('/rw-project:project[rw-project:name="default"]/vnfr-catalog/vnfr', list_obj=True)
+    for vnfr in vnfrs.vnfr:
+        for cp in vnfr.connection_point:
+            ns_cloud_resources['ports'].append(cp.connection_point_id)
+        for vdur in vnfr.vdur:
+            ns_cloud_resources['vms'].append(vdur.vim_id)
 
-    nsr_path = "/ns-instance-config"
-    nsr = rwnsr_proxy.get_config(nsr_path)
-    nsrs = nsr.nsr
+    # Get the network associated with each NS
+    vlrs = rwvlr_proxy.get('/rw-project:project[rw-project:name="default"]/vlr-catalog/vlr', list_obj=True)
+    for vlr in vlrs.vlr:
+        ns_cloud_resources['networks'].append(vlr.network_id)
 
-    xpaths = []
-    for nsr in nsrs:
-        xpath = "/ns-instance-config/nsr[id='{}']".format(nsr.id)
-        rwnsr_proxy.delete_config(xpath)
-        xpaths.append(xpath)
-
-    if wait_after_kill:
-        time.sleep(30)
-    else:
-        time.sleep(5)
-
-    for xpath in xpaths:
-        nsr = rwnsr_proxy.get_config(xpath)
-        assert nsr is None
-
-    # Get the ns-instance-config
-    ns_instance_config = rwnsr_proxy.get_config("/ns-instance-config")
-
-    # Termination tests
-    vnfr = "/vnfr-catalog/vnfr"
-    vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
-    assert vnfrs is None or len(vnfrs.vnfr) == 0
-
-    # nsr = "/ns-instance-opdata/nsr"
-    # nsrs = rwnsr_proxy.get(nsr, list_obj=True)
-    # assert len(nsrs.nsr) == 0
-
+    return ns_cloud_resources
 
 
 @pytest.mark.setup('nsr')
@@ -249,7 +222,7 @@
 class TestNsrStart(object):
     """A brief overview of the steps performed.
     1. Generate & on-board new descriptors
-    2. Start the NSR 
+    2. Start the NSR
     """
 
     def test_upload_descriptors(
@@ -260,32 +233,168 @@
             mgmt_session,
             scheme,
             cert,
-            descriptors
+            descriptors,
+            iteration,
         ):
         """Generates & On-boards the descriptors.
+
+        1. Request a staging area: RPC returns an endpoint and port
+        1. Upload the file to the endpoint, return the endpoint to download
+        2. Reconstruct the URL and trigger an RPC upload for the package.
         """
+        # We are instantiating the NS twice in port-sequencing test. Seconds NS instantiation will be using already uploaded
+        # descriptors with updated interface positional values.
+        if iteration==1 and pytest.config.getoption("--port-sequencing"):
+            pytest.skip()
         endpoint = "upload"
 
         for file_name in descriptors:
-            onboard_descriptor(
-                    mgmt_session.host,
-                    file_name,
-                    logger,
-                    endpoint,
-                    scheme,
-                    cert)
+
+            ip = RwStagingMgmtYang.YangInput_RwStagingMgmt_CreateStagingArea.from_dict({
+                    "package_type": "VNFD"})
+
+            if "nsd" in file_name:
+                ip.package_type = "NSD"
+
+            data = mgmt_session.proxy(RwStagingMgmtYang).rpc(ip)
+            form = requests_toolbelt.MultipartEncoder(fields={
+                        'file': (os.path.basename(file_name),
+                                 open(file_name, 'rb'),
+                                 'application/octet-stream')
+                        })
+
+            response = requests.post(
+                    "{}://{}:{}/{}".format(
+                            scheme,
+                            mgmt_session.host,
+                            data.port,
+                            data.endpoint),
+                    data=form.to_string(),
+                    cert=cert,  # cert is a tuple
+                    verify=False,
+                    headers={"Content-Type": "multipart/form-data"})
+
+            resp = json.loads(response.text)
+            url = "https://{}:{}{}".format(mgmt_session.host, data.port, resp['path'])
+
+            ip = RwPkgMgmtYang.YangInput_RwPkgMgmt_PackageCreate.from_dict({
+                    "package_type": "VNFD",
+                    "external_url": url
+                })
+
+            if "nsd" in file_name:
+                ip.package_type = "NSD"
+
+            # trigger the upload.
+            resp = mgmt_session.proxy(RwPkgMgmtYang).rpc(ip)
+
+            wait_onboard_transaction_finished(
+                logger,
+                resp.transaction_id,
+                scheme,
+                cert,
+                host=mgmt_session.host,
+                endpoint=endpoint)
 
         descriptor_vnfds, descriptor_nsd = descriptors[:-1], descriptors[-1]
 
-        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
         actual_vnfds = catalog.vnfd
         assert len(actual_vnfds) == len(descriptor_vnfds), \
                 "There should {} vnfds".format(len(descriptor_vnfds))
 
-        catalog = nsd_proxy.get_config('/nsd-catalog')
+        catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
         actual_nsds = catalog.nsd
         assert len(actual_nsds) == 1, "There should only be a single nsd"
 
+    @pytest.mark.skipif(not pytest.config.getoption('--upload-images-multiple-accounts'),
+                        reason="need --upload-images-multiple-accounts option to run")
+    def test_images_uploaded_multiple_accounts(self, logger, mgmt_session, random_image_name, cloud_accounts, cal):
+        image_mgmt_proxy = mgmt_session.proxy(RwImageMgmtYang)
+        upload_jobs = image_mgmt_proxy.get('/rw-project:project[rw-project:name="default"]/upload-jobs')
+        logger.info('Embedded image name(apart from ping pong Fedora images): {}'.format(random_image_name))
+        for job in upload_jobs.job:
+            assert image_mgmt_proxy.wait_for('/rw-project:project[rw-project:name="default"]/upload-jobs/job[id={}]/status'.format(quoted_key(job.id)), 'COMPLETED', timeout=240)
+            assert len(job.upload_tasks) == len(cloud_accounts)
+            for upload_task in job.upload_tasks:
+                assert upload_task.status == 'COMPLETED'
+
+        assert len(upload_jobs.job) == 3
+
+        # Check whether images are present in VIMs
+        for account in cloud_accounts:
+            rc, res = cal.get_image_list(RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict(account.as_dict()))
+            assert rc == RwTypes.RwStatus.SUCCESS
+            assert [image for image in res.imageinfo_list if image.name == random_image_name]
+
+    @pytest.mark.skipif(not pytest.config.getoption("--vnf-onboard-delete"), reason="need --vnf-onboard-delete option to run")
+    def test_upload_delete_descriptors(self, logger, mgmt_session, vnfd_proxy, descriptors, vnf_onboard_delete):
+        """Randomly upload and delete VNFs. With each upload/delete, verify if the VNF
+        gets uploaded/deleted successfully.
+        """
+        xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]"
+        iteration, vnf_count = map(int, vnf_onboard_delete.split(','))
+
+        # Get the VNF paths to be used for onboarding
+        all_vnfs = [pkg_path for pkg_path in descriptors if '_nsd' not in os.path.basename(pkg_path)]
+        if vnf_count > len(all_vnfs):
+            vnf_count = len(all_vnfs)
+        available_vnfs = random.sample(all_vnfs, vnf_count)
+
+        # Get the add, delete iterations
+        add_del_seq = list(np.random.choice(['add', 'del'], iteration))
+        random.shuffle(add_del_seq)
+        logger.info('Vnf add-delete iteration sequence: {}'.format(add_del_seq))
+
+        uploaded_vnfs = {}
+
+        def get_vnfd_list():
+            """Returns list of VNFDs"""
+            vnfd_obj = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
+            return vnfd_obj.vnfd if vnfd_obj else []
+
+        def delete_vnfd():
+            """Deletes a VNFD"""
+            vnf_path, vnfd_id = random.choice(list(uploaded_vnfs.items()))
+            logger.info('Deleting VNF {} having id {}'.format(os.path.basename(vnf_path), vnfd_id))
+            vnfd_proxy.delete_config(xpath.format(quoted_key(vnfd_id)))
+            uploaded_vnfs.pop(vnf_path)
+            available_vnfs.append(vnf_path)
+            assert not [vnfd for vnfd in get_vnfd_list() if vnfd.id == vnfd_id]
+
+        for op_type in add_del_seq:
+            if op_type =='del':
+                if uploaded_vnfs:
+                    delete_vnfd()
+                    continue
+                op_type = 'add'
+
+            if op_type == 'add':
+                if not available_vnfs:
+                    delete_vnfd()
+                    continue
+                vnf_path = random.choice(available_vnfs)
+                logger.info('Adding VNF {}'.format(os.path.basename(vnf_path)))
+                rift.auto.descriptor.onboard(mgmt_session, vnf_path)
+                vnfs = get_vnfd_list()
+                assert len(vnfs) == len(uploaded_vnfs) + 1
+                vnfd = [vnfd for vnfd in vnfs if vnfd.id not in list(uploaded_vnfs.values())]
+                assert len(vnfd) == 1
+                vnfd = vnfd[0]
+                assert vnfd.name
+                assert vnfd.connection_point
+                assert vnfd.vdu
+                uploaded_vnfs[vnf_path] = vnfd.id
+                available_vnfs.remove(vnf_path)
+
+            assert len(get_vnfd_list()) == len(uploaded_vnfs)
+            logger.info('Onboarded VNFs : {}'.format(uploaded_vnfs))
+
+        assert len(available_vnfs) + len(uploaded_vnfs) == vnf_count
+        # cleanup - Delete VNFs(if any)
+        for vnfd_id in uploaded_vnfs.values():
+            vnfd_proxy.delete_config(xpath.format(quoted_key(vnfd_id)))
+
     @pytest.mark.feature("upload-image")
     def test_upload_images(self, descriptor_images, cloud_host, cloud_user, cloud_tenants):
 
@@ -295,7 +404,7 @@
                 [(tenant, "private") for tenant in cloud_tenants])
 
         for image_location in descriptor_images:
-            image = RwcalYang.ImageInfoItem.from_dict({
+            image = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList.from_dict({
                     'name': os.path.basename(image_location),
                     'location': image_location,
                     'disk_format': 'qcow2',
@@ -304,14 +413,85 @@
 
 
     def test_set_scaling_params(self, nsd_proxy):
-        nsds = nsd_proxy.get('/nsd-catalog')
+        nsds = nsd_proxy.get('/rw-project:project[rw-project:name="default"]/nsd-catalog')
         nsd = nsds.nsd[0]
         for scaling_group in nsd.scaling_group_descriptor:
             scaling_group.max_instance_count = 2
 
-        nsd_proxy.replace_config('/nsd-catalog/nsd[id="{}"]'.format(
-            nsd.id), nsd)
+        nsd_proxy.replace_config('/rw-project:project[rw-project:name="default"]/nsd-catalog/nsd[id={}]'.format(
+            quoted_key(nsd.id)), nsd)
 
+    @pytest.mark.skipif(not (pytest.config.getoption("--update-vnfd-instantiate") or pytest.config.getoption("--port-sequencing")),
+                        reason="need --update-vnfd-instantiate or --port-sequencing option to run")
+    def test_update_vnfd(self, vnfd_proxy, iteration, port_sequencing_intf_positions):
+        """Updates few fields of ping pong VNFDs and verify those changes
+        """
+        xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]"
+        vnfd_catalog = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd"
+
+        if iteration==0 and pytest.config.getoption("--port-sequencing"):
+            pytest.skip()
+
+        def get_vnfd():
+            vnfds = vnfd_proxy.get(vnfd_catalog, list_obj=True)
+            dict_ = {}
+
+            # Get ping pong VNFDs
+            for vnfd in vnfds.vnfd:
+                if 'ping' in vnfd.name:
+                    dict_['ping'] = vnfd
+                if 'pong' in vnfd.name:
+                    dict_['pong'] = vnfd
+            return dict_
+
+        vnfds_dict = get_vnfd()
+        update_data = {'ping':{'static_ip_address':'31.31.31.60'}, 'pong':{'static_ip_address':'31.31.31.90'}}
+        port_sequencing_intf_positions_tmp = port_sequencing_intf_positions[:]
+
+        # Modify/add fields in VNFDs
+        for name_, vnfd in vnfds_dict.items():
+            if pytest.config.getoption('--update-vnfd-instantiate'):
+                vnfd.vdu[0].interface[1].static_ip_address = update_data[name_]['static_ip_address']
+            if pytest.config.getoption('--port-sequencing'):
+                vnfd_intf_list = vnfd.vdu[0].interface
+                # for ping vnfd, remove positional values from all interfaces
+                # for pong vnfd, modify the positional values as per fixture port_sequencing_intf_positions
+                if 'ping' in vnfd.name:
+                    tmp_intf_list = []
+                    for i in range(len(vnfd_intf_list)):
+                        tmp_intf_dict = vnfd_intf_list[-1].as_dict()
+                        del tmp_intf_dict['position']
+                        vnfd_intf_list.pop()
+                        tmp_intf_list.append(tmp_intf_dict)
+                    for intf_dict_without_positional_values in tmp_intf_list:
+                        new_intf = vnfd.vdu[0].interface.add()
+                        new_intf.from_dict(intf_dict_without_positional_values)
+
+                if 'pong' in vnfd.name:
+                    for intf in vnfd_intf_list:
+                        if 'position' in intf:
+                            intf.position = port_sequencing_intf_positions_tmp.pop()
+
+        # Update/save the VNFDs
+        for vnfd in vnfds_dict.values():
+            vnfd_proxy.replace_config(xpath.format(quoted_key(vnfd.id)), vnfd)
+
+        # Match whether data is updated
+        vnfds_dict = get_vnfd()
+        assert vnfds_dict
+        for name_, vnfd in vnfds_dict.items():
+            if pytest.config.getoption('--update-vnfd-instantiate'):
+                assert vnfd.vdu[0].interface[1].static_ip_address == update_data[name_]['static_ip_address']
+            if pytest.config.getoption('--port-sequencing'):
+                if 'ping' in vnfd.name:
+                    for intf in vnfd.vdu[0].interface:
+                        assert 'position' not in intf.as_dict()
+                if 'pong' in vnfd.name:
+                    tmp_positional_values_list = []
+                    for intf in vnfd.vdu[0].interface:
+                        if 'position' in intf.as_dict():
+                            tmp_positional_values_list.append(intf.position)
+                    assert set(tmp_positional_values_list) == set(port_sequencing_intf_positions)
 
     def test_instantiate_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account_name):
 
@@ -329,47 +509,72 @@
                                                                            config_param.value,
                                                                            running_config.input_parameter))
 
-        catalog = nsd_proxy.get_config('/nsd-catalog')
+        catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
         nsd = catalog.nsd[0]
 
         input_parameters = []
-        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
-        descr_value = "New NSD Description"
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd/nsd:vendor"
+        descr_value = "New Vendor"
         in_param_id = str(uuid.uuid4())
 
-        input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+        input_param_1 = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
                                                                 xpath=descr_xpath,
                                                                 value=descr_value)
 
         input_parameters.append(input_param_1)
 
-        nsr = create_nsr(nsd, input_parameters, cloud_account_name)
+        nsr = rift.auto.descriptor.create_nsr(cloud_account_name, nsd.name, nsd, input_param_list=input_parameters)
 
         logger.info("Instantiating the Network Service")
-        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+        rwnsr_proxy.create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr)
 
-        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+        nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata/nsr[ns-instance-config-ref={}]'.format(quoted_key(nsr.id)))
         assert nsr_opdata is not None
 
         # Verify the input parameter configuration
-        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        running_config = rwnsr_proxy.get_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id=%s]" % quoted_key(nsr.id))
         for input_param in input_parameters:
             verify_input_parameters(running_config, input_param)
 
     def test_wait_for_nsr_started(self, rwnsr_proxy):
-        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        """Verify NSR instances enter 'running' operational-status
+        """
+        nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
         nsrs = nsr_opdata.nsr
 
         for nsr in nsrs:
-            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
-            rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=240)
+            xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(quoted_key(nsr.ns_instance_config_ref))
+            rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=400)
+
+    def test_wait_for_nsr_configured(self, rwnsr_proxy):
+        """Verify NSR instances enter 'configured' config-status
+        """
+        nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(quoted_key(nsr.ns_instance_config_ref))
+            rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=400)
 
 
 @pytest.mark.teardown('nsr')
 @pytest.mark.depends('launchpad')
 @pytest.mark.incremental
 class TestNsrTeardown(object):
-    def test_terminate_nsr(self, rwvnfr_proxy, rwnsr_proxy, logger, cloud_type):
+
+    def test_delete_embedded_images(self, random_image_name, cloud_accounts, cal):
+        """Deletes images embedded in VNF from VIM. It only deletes additional images, not
+        the Fedora ping pong images"""
+        for account in cloud_accounts:
+            rc, rsp = cal.get_image_list(RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict(account.as_dict()))
+            assert rc == RwTypes.RwStatus.SUCCESS
+            if rsp is not None:
+                for image in rsp.imageinfo_list:
+                    if random_image_name in image.name:
+                        cal.delete_image(RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict(account.as_dict()), image.id)
+
+    def test_terminate_nsr(self, rwvnfr_proxy, rwnsr_proxy, logger, cloud_type,
+                           rwvlr_proxy, vim_clients, cloud_account_name):
         """
         Terminate the instance and check if the record is deleted.
 
@@ -377,32 +582,66 @@
         1. NSR record is deleted from instance-config.
 
         """
-        logger.debug("Terminating NSR")
+        # Collects the Cloud resources like ports, networks, VMs used by the current NS
+        ns_cloud_resources = get_ns_cloud_resources(rwvnfr_proxy, rwvlr_proxy)
+        logger.info('Cloud resources used by NS: {}'.format(ns_cloud_resources))
 
+        logger.debug("Terminating NSR")
         wait_after_kill = True
         if cloud_type == "mock":
             wait_after_kill = False
 
-        terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger, wait_after_kill=wait_after_kill)
+        rift.auto.descriptor.terminate_nsr(rwvnfr_proxy, rwnsr_proxy,
+                                           rwvlr_proxy, logger,
+                                           wait_after_kill=wait_after_kill)
+        # Collect all the ports, networks VMs from openstack and
+        # check if previously collected resources (i.e ns_cloud_resources) are still present in this collection
+        start_time = time.time()
+        while time.time()-start_time < 240:
+            try:
+                vim_client = vim_clients[cloud_account_name]
+                vim_resources = dict()
+                vim_resources['networks'] = vim_client.neutron_network_list()
+                vim_resources['vms'] = vim_client.nova_server_list()
+                vim_resources['ports'] = vim_client.neutron_port_list()
 
-    def test_delete_records(self, nsd_proxy, vnfd_proxy):
+                for resource_type in ns_cloud_resources.keys():
+                    logger.debug("Verifying all %s resources have been removed from vim", resource_type)
+                    vim_resource_ids = [
+                        vim_resource['id'] for vim_resource in vim_resources[resource_type]
+                        if 'shared' not in vim_resource.keys()
+                        or not vim_resource['shared']
+                    ]
+                    for ns_resource_id in ns_cloud_resources[resource_type]:
+                        logger.debug('Verifying %s resource %s removed', resource_type, ns_resource_id)
+                        assert ns_resource_id not in vim_resource_ids
+                return
+            except AssertionError:
+                time.sleep(10)
+        raise AssertionError("Resources not cleared from openstack")
+
+    def test_delete_records(self, nsd_proxy, vnfd_proxy, iteration):
         """Delete the NSD & VNFD records
 
         Asserts:
             The records are deleted.
         """
-        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        # We are instantiating the NS twice in port-sequencing test. Seconds NS instantiation will be using already uploaded
+        # descriptors with updated interface positional values.
+        if iteration==0 and pytest.config.getoption("--port-sequencing"):
+            pytest.skip()
+        nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
         for nsd in nsds.nsd:
-            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(quoted_key(nsd.id))
             nsd_proxy.delete_config(xpath)
 
-        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
         assert nsds is None or len(nsds.nsd) == 0
 
-        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
         for vnfd_record in vnfds.vnfd:
-            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
             vnfd_proxy.delete_config(xpath)
 
-        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
         assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/test_failover.py b/rwlaunchpad/ra/pytest/test_failover.py
index 60ba82a..d24fbff 100755
--- a/rwlaunchpad/ra/pytest/test_failover.py
+++ b/rwlaunchpad/ra/pytest/test_failover.py
@@ -20,15 +20,17 @@
 @brief System test of stopping launchpad on master and
 validating configuration on standby
 """
+import argparse
+import gi
 import os
+import subprocess
 import sys
 import time
-import argparse
-import subprocess
 
-import gi
-from gi.repository import RwVnfdYang
+from gi.repository import RwProjectVnfdYang
 from gi.repository import RwVnfrYang
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
 
 import rift.auto.proxy
 from rift.auto.session import NetconfSession
@@ -46,10 +48,10 @@
         Tuple: VNFD and its corresponding VNFR entry
     """
     def get_vnfd(vnfd_id):
-        xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_id)
-        return proxy(RwVnfdYang).get(xpath)
+        xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_id))
+        return proxy(RwProjectVnfdYang).get(xpath)
 
-    vnfr = "/vnfr-catalog/vnfr"
+    vnfr = "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr"
     print ("START")
     vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True)
     print ("STOP")
diff --git a/rwlaunchpad/ra/pytest/test_launchpad.py b/rwlaunchpad/ra/pytest/test_launchpad.py
index a6f5ae7..2a275bc 100644
--- a/rwlaunchpad/ra/pytest/test_launchpad.py
+++ b/rwlaunchpad/ra/pytest/test_launchpad.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python3
 """
-# 
+#
 #   Copyright 2016 RIFT.IO Inc
 #
 #   Licensed under the Apache License, Version 2.0 (the "License");
@@ -22,12 +22,17 @@
 @brief System test of basic launchpad functionality
 """
 
+import gi
 import pytest
 
-import gi
 gi.require_version('RwsdnalYang', '1.0')
 
 from gi.repository import RwsdnalYang
+from gi.repository import RwSdnYang
+from gi.repository import RwRoAccountYang
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
 
 @pytest.mark.setup('sdn')
 @pytest.mark.feature('sdn')
@@ -40,11 +45,36 @@
             SDN name and accout type.
         '''
         proxy = mgmt_session.proxy(RwsdnalYang)
-        sdn_account = RwsdnalYang.SDNAccount(
+        sdn_account = RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList(
                 name=sdn_account_name,
                 account_type=sdn_account_type)
-        xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
-        proxy.create_config(xpath, sdn_account)
+        xpath = "/rw-project:project[rw-project:name='default']/sdn-accounts/sdn-account-list[name=%s]" % quoted_key(sdn_account_name)
+        proxy.replace_config(xpath, sdn_account)
+        sdn_account = proxy.get(xpath)
+
+    def test_create_openstack_sdn_account(self, mgmt_session, openstack_sdn_account_name, cloud_account):
+        '''Configure sdn account
+
+        Asserts:
+            SDN name and account type.
+        '''
+        proxy = mgmt_session.proxy(RwSdnYang)
+        sdn_account = RwSdnYang.YangData_RwProject_Project_Sdn_Account.from_dict({
+                        'name':  openstack_sdn_account_name,
+                        'account_type': 'openstack',
+                        'openstack': {
+                            'admin': cloud_account.openstack.admin,
+                            'key': cloud_account.openstack.key,
+                            'secret': cloud_account.openstack.secret,
+                            'auth_url': cloud_account.openstack.auth_url,
+                            'tenant': cloud_account.openstack.tenant,
+                            'project_domain': cloud_account.openstack.project_domain,
+                            'user_domain': cloud_account.openstack.user_domain,
+                            'region': cloud_account.openstack.region,
+                                    }})
+
+        xpath = "/rw-project:project[rw-project:name='default']/sdn/account[name={}]".format(quoted_key(openstack_sdn_account_name))
+        proxy.replace_config(xpath, sdn_account)
         sdn_account = proxy.get(xpath)
 
 @pytest.mark.depends('sdn')
@@ -58,10 +88,23 @@
             sdn_account.account_type is what was configured
         '''
         proxy = mgmt_session.proxy(RwsdnalYang)
-        xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
+        xpath = "/rw-project:project[rw-project:name='default']/sdn-accounts/sdn-account-list[name=%s]" % quoted_key(sdn_account_name)
         sdn_account = proxy.get_config(xpath)
         assert sdn_account.account_type == sdn_account_type
 
+    def test_openstack_sdn_account_connection_status(self, mgmt_session, openstack_sdn_account_name):
+        '''Verify connection status on openstack sdn account
+
+        Asserts:
+            openstack sdn account is successfully connected
+        '''
+        proxy = mgmt_session.proxy(RwSdnYang)
+        proxy.wait_for(
+            '/rw-project:project[rw-project:name="default"]/sdn/account[name={}]/connection-status/status'.format(quoted_key(openstack_sdn_account_name)),
+            'success',
+            timeout=30,
+            fail_on=['failure'])
+
 @pytest.mark.teardown('sdn')
 @pytest.mark.feature('sdn')
 @pytest.mark.incremental
@@ -69,15 +112,22 @@
     def test_delete_odl_sdn_account(self, mgmt_session, sdn_account_name):
         '''Unconfigure sdn account'''
         proxy = mgmt_session.proxy(RwsdnalYang)
-        xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
+        xpath = "/rw-project:project[rw-project:name='default']/sdn-accounts/sdn-account-list[name=%s]" % quoted_key(sdn_account_name)
+        proxy.delete_config(xpath)
+
+    def test_delete_openstack_sdn_account(self, mgmt_session, openstack_sdn_account_name):
+        '''Unconfigure sdn account'''
+        proxy = mgmt_session.proxy(RwSdnYang)
+        xpath = '/rw-project:project[rw-project:name="default"]/sdn/account[name={}]'.format(quoted_key(openstack_sdn_account_name))
         proxy.delete_config(xpath)
 
 
 @pytest.mark.setup('launchpad')
+@pytest.mark.depends('sdn')
 @pytest.mark.usefixtures('cloud_account')
 @pytest.mark.incremental
 class TestLaunchpadSetup:
-    def test_create_cloud_accounts(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+    def test_create_cloud_accounts(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts, l2_port_chaining, openstack_sdn_account_name):
         '''Configure cloud accounts
 
         Asserts:
@@ -85,16 +135,14 @@
         '''
         proxy = mgmt_session.proxy(cloud_module)
         for cloud_account in cloud_accounts:
-            xpath = '{}[name="{}"]'.format(cloud_xpath, cloud_account.name)
+            if l2_port_chaining:
+                cloud_account.sdn_account = openstack_sdn_account_name
+            xpath = '{}[name={}]'.format(cloud_xpath, quoted_key(cloud_account.name))
             proxy.replace_config(xpath, cloud_account)
             response =  proxy.get(xpath)
             assert response.name == cloud_account.name
             assert response.account_type == cloud_account.account_type
 
-@pytest.mark.depends('launchpad')
-@pytest.mark.usefixtures('cloud_account')
-@pytest.mark.incremental
-class TestLaunchpad:
     def test_account_connection_status(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
         '''Verify connection status on each cloud account
 
@@ -104,11 +152,26 @@
         proxy = mgmt_session.proxy(cloud_module)
         for cloud_account in cloud_accounts:
             proxy.wait_for(
-                '{}[name="{}"]/connection-status/status'.format(cloud_xpath, cloud_account.name),
+                '{}[name={}]/connection-status/status'.format(cloud_xpath, quoted_key(cloud_account.name)),
                 'success',
                 timeout=30,
                 fail_on=['failure'])
 
+    @pytest.mark.feature('openmano')
+    def test_create_ro_accounts(self, mgmt_session, ro_accounts):
+        for name, ro_account in ro_accounts.items():
+            mgmt_session.proxy(RwRoAccountYang).create_config('/rw-project:project[rw-project:name="default"]/ro-account/account', ro_account)
+
+    @pytest.mark.feature('openmano')
+    def test_ro_account_connection_status(self, mgmt_session, ro_accounts):
+        for name, ro_account in ro_accounts.items():
+            mgmt_session.proxy(RwRoAccountYang).wait_for((
+                '/rw-project:project[rw-project:name="default"]'
+                '/ro-account-state/account[name={account_name}]/connection-status/status'
+                ).format(account_name=quoted_key(ro_account.name)),
+                'success',
+                timeout=30,
+                fail_on=['failure'])
 
 @pytest.mark.teardown('launchpad')
 @pytest.mark.usefixtures('cloud_account')
@@ -118,5 +181,11 @@
         '''Unconfigure cloud_account'''
         proxy = mgmt_session.proxy(cloud_module)
         for cloud_account in cloud_accounts:
-            xpath = "{}[name='{}']".format(cloud_xpath, cloud_account.name)
+            xpath = "{}[name={}]".format(cloud_xpath, quoted_key(cloud_account.name))
             proxy.delete_config(xpath)
+
+    @pytest.mark.feature('openmano')
+    def test_delete_ro_accounts(self, mgmt_session, ro_accounts):
+        for name, ro_account in ro_accounts.items():
+            xpath = "/rw-project:project[rw-project:name='default']/ro-account/account[name={}]"
+            mgmt_session.proxy(RwRoAccountYang).delete_config(xpath.format(quoted_key(name)))
diff --git a/rwlaunchpad/ra/pytest/test_start_standby.py b/rwlaunchpad/ra/pytest/test_start_standby.py
index cf0e5d9..80e4e7f 100755
--- a/rwlaunchpad/ra/pytest/test_start_standby.py
+++ b/rwlaunchpad/ra/pytest/test_start_standby.py
@@ -51,7 +51,7 @@
 
     cmd_template = ("ssh_root {remote_ip} -q -o BatchMode=yes -o "
     " UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -- "
-    " \"rm -rf /tmp/corosync; cd {rift_install}; {rift_root}/rift-shell -e -- {rift_install}/usr/bin/rwmain -m /tmp/manifest.xml\"").format(
+    " \"rm -rf /tmp/corosync; cd {rift_install}; {rift_root}/rift-shell -- {rift_install}/usr/bin/rwmain -m /tmp/manifest.xml\"").format(
       remote_ip=remote_ip,
       rift_root=rift_root,
       rift_install=rift_install)