update from RIFT as of 696b75d2fe9fb046261b08c616f1bcf6c0b54a9b second try
Signed-off-by: Jeremy Mordkoff <Jeremy.Mordkoff@riftio.com>
diff --git a/rwlaunchpad/test/CMakeLists.txt b/rwlaunchpad/test/CMakeLists.txt
index 1c18e26..c84f056 100644
--- a/rwlaunchpad/test/CMakeLists.txt
+++ b/rwlaunchpad/test/CMakeLists.txt
@@ -23,7 +23,7 @@
PROGRAMS
launchpad.py
DESTINATION demos
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
install(
@@ -31,7 +31,7 @@
pytest/lp_test.py
DESTINATION
usr/rift/systemtest/pytest/launchpad
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
install(
@@ -39,14 +39,14 @@
launchpad_recovery
DESTINATION
usr/rift/systemtest/launchpad
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
install(
PROGRAMS
launchpad
DESTINATION usr/bin
- COMPONENT rwcal-1.0
+ COMPONENT ${INSTALL_COMPONENT}
)
rift_py3test(utest_rwmonitor
diff --git a/rwlaunchpad/test/launchpad.py b/rwlaunchpad/test/launchpad.py
index 98680ba..89c00ab 100755
--- a/rwlaunchpad/test/launchpad.py
+++ b/rwlaunchpad/test/launchpad.py
@@ -40,8 +40,23 @@
from rift.vcs.ext import ClassProperty
+
logger = logging.getLogger(__name__)
+IDP_PORT_NUMBER = "8009"
+
+def get_launchpad_address():
+ # Search for externally accessible IP address with netifaces
+ gateways = netifaces.gateways()
+ # Check for default route facing interface and then get its ip address
+ if 'default' in gateways:
+ interface = gateways['default'][netifaces.AF_INET][1]
+ launchpad_ip_address = netifaces.ifaddresses(interface)[netifaces.AF_INET][0]['addr']
+ else:
+ # no default gateway. Revert to 127.0.0.1
+ launchpad_ip_address = "127.0.0.1"
+
+ return launchpad_ip_address
class NsmTasklet(rift.vcs.core.Tasklet):
"""
@@ -52,6 +67,7 @@
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a NsmTasklet object.
@@ -64,6 +80,7 @@
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwnsmtasklet')
@@ -79,6 +96,7 @@
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a VnsTasklet object.
@@ -91,6 +109,7 @@
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnstasklet')
@@ -106,6 +125,7 @@
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a VnfmTasklet object.
@@ -118,6 +138,7 @@
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnfmtasklet')
@@ -133,6 +154,7 @@
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a ResMgrTasklet object.
@@ -145,6 +167,7 @@
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwresmgrtasklet')
@@ -160,6 +183,7 @@
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a Image Manager Tasklet object.
@@ -173,6 +197,7 @@
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwimagemgrtasklet')
@@ -188,6 +213,7 @@
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a MonitorTasklet object.
@@ -201,6 +227,7 @@
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwmonitor')
@@ -211,6 +238,7 @@
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ANY_VM.value,
):
super(RedisServer, self).__init__(
name=name,
@@ -218,6 +246,7 @@
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
@property
@@ -235,6 +264,7 @@
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a MonitoringParameterTasklet object.
@@ -248,6 +278,7 @@
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwmonparam')
@@ -264,6 +295,7 @@
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a MonitoringParameterTasklet object.
@@ -277,6 +309,7 @@
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwautoscaler')
@@ -291,6 +324,7 @@
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a StagingMangerTasklet object.
@@ -304,32 +338,20 @@
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwstagingmgr')
plugin_name = ClassProperty('rwstagingmgr')
-def get_ui_ssl_args():
- """Returns the SSL parameter string for launchpad UI processes"""
-
- try:
- use_ssl, certfile_path, keyfile_path = certs.get_bootstrap_cert_and_key()
- except certs.BootstrapSslMissingException:
- logger.error('No bootstrap certificates found. Disabling UI SSL')
- use_ssl = False
-
- # If we're not using SSL, no SSL arguments are necessary
- if not use_ssl:
- return ""
-
- return "--enable-https --keyfile-path=%s --certfile-path=%s" % (keyfile_path, certfile_path)
-
class UIServer(rift.vcs.NativeProcess):
def __init__(self, name="RW.MC.UI",
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
+ external_address=None,
):
super(UIServer, self).__init__(
name=name,
@@ -337,11 +359,40 @@
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
+ self._external_address = external_address
@property
def args(self):
- return get_ui_ssl_args()
+ return self._get_ui_args()
+
+ def _get_ui_args(self):
+ """Returns the SSL parameter string for launchpad UI processes"""
+
+ try:
+ use_ssl, certfile_path, keyfile_path = certs.get_bootstrap_cert_and_key()
+ except certs.BootstrapSslMissingException:
+ logger.error('No bootstrap certificates found. Disabling UI SSL')
+ use_ssl = False
+
+ # If we're not using SSL, no SSL arguments are necessary
+ if not use_ssl:
+ return ""
+
+ # If an external address is set, take that value for launchpad IP
+ # address, else use the internal IP address used for default route
+ launchpad_ip_address = self._external_address
+ if not launchpad_ip_address:
+ launchpad_ip_address = get_launchpad_address()
+
+ return "--enable-https" +\
+ " --keyfile-path={}".format(keyfile_path) +\
+ " --certfile-path={}".format(certfile_path) +\
+ " --launchpad-address={}".format(launchpad_ip_address) +\
+ " --idp-port-number={}".format(IDP_PORT_NUMBER) +\
+ " --callback-address={}".format(launchpad_ip_address)
+
class ConfigManagerTasklet(rift.vcs.core.Tasklet):
"""
@@ -352,6 +403,7 @@
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a ConfigManagerTasklet object.
@@ -364,11 +416,42 @@
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwconmantasklet')
plugin_name = ClassProperty('rwconmantasklet')
+
+class ProjectMgrManoTasklet(rift.vcs.core.Tasklet):
+ """
+ This class represents a Resource Manager tasklet.
+ """
+
+ def __init__(self, name='Project-Manager-Mano', uid=None,
+ config_ready=True,
+ recovery_action=core.RecoveryType.FAILCRITICAL.value,
+ data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
+ ):
+ """
+ Creates a ProjectMgrManoTasklet object.
+
+ Arguments:
+ name - the name of the tasklet
+ uid - a unique identifier
+ """
+ super(ProjectMgrManoTasklet, self).__init__(name=name, uid=uid,
+ config_ready=config_ready,
+ recovery_action=recovery_action,
+ data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
+ )
+
+ plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwprojectmano')
+ plugin_name = ClassProperty('rwprojectmano')
+
+
class PackageManagerTasklet(rift.vcs.core.Tasklet):
"""
This class represents a Resource Manager tasklet.
@@ -378,6 +461,7 @@
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a PackageManager object.
@@ -390,6 +474,7 @@
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwpkgmgr')
@@ -400,6 +485,7 @@
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
super(GlanceServer, self).__init__(
name=name,
@@ -407,6 +493,7 @@
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
@property
@@ -415,71 +502,88 @@
class Demo(rift.vcs.demo.Demo):
- def __init__(self, no_ui=False, ha_mode=None, mgmt_ip_list=[], test_name=None):
- procs = [
- ConfigManagerTasklet(),
- GlanceServer(),
- rift.vcs.DtsRouterTasklet(),
- rift.vcs.MsgBrokerTasklet(),
- rift.vcs.RestPortForwardTasklet(),
- rift.vcs.RestconfTasklet(),
- rift.vcs.RiftCli(),
- rift.vcs.uAgentTasklet(),
- rift.vcs.Launchpad(),
- ]
-
- standby_procs = [
- RedisServer(),
- rift.vcs.DtsRouterTasklet(),
- rift.vcs.MsgBrokerTasklet(),
- ]
+ def __init__(self, no_ui=False,
+ data_store=None,
+ mgmt_ip_list=[],
+ test_name=None,
+ start_auth_svc=None,
+ start_pam_svc=None,
+ external_address=None):
datastore = core.DataStore.BDB.value
- if ha_mode:
- procs.append(RedisServer())
+ if data_store == "Redis":
datastore = core.DataStore.REDIS.value
+ elif data_store == "None":
+ datastore = core.DataStore.NOSTORE.value
+
+ restart_db_active = {"recovery_action" : core.RecoveryType.RESTART.value, \
+ "data_storetype" : datastore, \
+ "ha_startup_mode" : core.HaStartup.ONLY_ACTIVE.value}
+
+ failcrit_db_active = {"recovery_action" : core.RecoveryType.FAILCRITICAL.value, \
+ "data_storetype" : datastore, \
+ "ha_startup_mode" : core.HaStartup.ONLY_ACTIVE.value}
+
+ failcrit_db_any = {"recovery_action" : core.RecoveryType.FAILCRITICAL.value, \
+ "data_storetype" : datastore, \
+ "ha_startup_mode" : core.HaStartup.ANY_VM.value}
+
+ procs = [
+ ConfigManagerTasklet(**failcrit_db_active),
+ GlanceServer(**failcrit_db_active),
+ rift.vcs.DtsRouterTasklet(**failcrit_db_any),
+ rift.vcs.MsgBrokerTasklet(**failcrit_db_any),
+ rift.vcs.RestconfTasklet(**failcrit_db_active),
+ rift.vcs.RiftCli(**failcrit_db_active, as_console=True),
+ rift.vcs.uAgentTasklet(**failcrit_db_any),
+ rift.vcs.Launchpad(**failcrit_db_active),
+ rift.vcs.IdentityManagerTasklet(**failcrit_db_active),
+ rift.vcs.ProjectManagerTasklet(**failcrit_db_active),
+ rift.vcs.HAManager(**failcrit_db_any),
+ rift.vcs.OpenIDCProviderTasklet(**failcrit_db_active),
+ rift.vcs.AuthExtUserTasklet(**failcrit_db_active),
+ rift.vcs.OTTAuthTasklet(**failcrit_db_active),
+ NsmTasklet(**failcrit_db_active),
+ VnfmTasklet(**failcrit_db_active),
+ VnsTasklet(**failcrit_db_active),
+ ResMgrTasklet(**failcrit_db_active),
+ ImageMgrTasklet(**failcrit_db_active),
+ AutoscalerTasklet(**failcrit_db_active),
+ StagingManagerTasklet(**failcrit_db_active),
+ PackageManagerTasklet(**failcrit_db_active),
+ MonitoringParameterTasklet(**failcrit_db_active),
+ ProjectMgrManoTasklet(**failcrit_db_active)
+ ]
+
+ if datastore == core.DataStore.REDIS.value:
+ procs.append(RedisServer(**failcrit_db_any))
if not no_ui:
- procs.append(UIServer())
+ procs.append(UIServer(external_address=external_address))
- restart_procs = [
- VnfmTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- VnsTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- # MonitorTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- MonitoringParameterTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- NsmTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- ResMgrTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- ImageMgrTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- AutoscalerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- PackageManagerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- StagingManagerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- ]
+ if start_auth_svc:
+ procs.append(rift.vcs.WebAuthSvcTasklet(**failcrit_db_active))
+
+ if start_pam_svc:
+ procs.append(rift.vcs.PAMAuthTasklet())
+
+ restart_procs = []
if not mgmt_ip_list or len(mgmt_ip_list) == 0:
- mgmt_ip_list.append("127.0.0.1")
+ mgmt_ip_list.append(get_launchpad_address())
colony = rift.vcs.core.Colony(name='top', uid=1)
-
- lead_lp_vm = rift.vcs.VirtualMachine(
- name='vm-launchpad-1',
- ip=mgmt_ip_list[0],
- procs=procs,
- restart_procs=restart_procs,
- )
- lead_lp_vm.leader = True
- colony.append(lead_lp_vm)
-
- if ha_mode:
- stby_lp_vm = rift.vcs.VirtualMachine(
- name='launchpad-vm-2',
- ip=mgmt_ip_list[1],
- procs=standby_procs,
- start=False,
- )
- # WA to Agent mode_active flag reset
- stby_lp_vm.add_tasklet(rift.vcs.uAgentTasklet(), mode_active=False)
- colony.append(stby_lp_vm)
-
+ leader = 0
+ for mgmt_ip in mgmt_ip_list:
+ vm = rift.vcs.VirtualMachine(name='mgmt-vm-lp',
+ ip=mgmt_ip,
+ procs=procs,
+ restart_procs=restart_procs,start=False,)
+ if (leader == 0):
+ vm.leader = True
+ leader = 1
+ colony.append(vm)
+
sysinfo = rift.vcs.SystemInfo(
mode='ethsim',
zookeeper=rift.vcs.manifest.RaZookeeper(master_ip=mgmt_ip_list[0]),
@@ -518,15 +622,38 @@
# Create a parser which includes all generic demo arguments
parser = rift.vcs.demo.DemoArgParser()
parser.add_argument("--no-ui", action='store_true')
+ parser.add_argument("--start-auth-svc",
+ action='store_true',
+ help="Start the Web Based Authentication service simualtor.")
+ parser.add_argument("--start-pam-svc",
+ action='store_true',
+ help="Start the PAM Authentication service.")
+ parser.add_argument("--external-address",
+ type=str,
+ help="External IP address or hostname using which the host can "+
+ "be reached.")
+ if rift.vcs.mgmt.default_agent_mode() == 'CONFD':
+ parser.add_argument("--use-osm-model",
+ action='store_true',
+ help="Load only OSM specific models and hide the Rift Specific Augments")
+
args = parser.parse_args(argv)
# Disable loading any kernel modules for the launchpad VM
# since it doesn't need it and it will fail within containers
os.environ["NO_KERNEL_MODS"] = "1"
+ # Get external_address from env if args not set
+ if args.external_address is None:
+ args.external_address = os.getenv("RIFT_EXTERNAL_ADDRESS")
+
+ os.environ["RIFT_EXTERNAL_ADDRESS"] = \
+ args.external_address if args.external_address else get_launchpad_address()
+
cleanup_dir_name = None
- if os.environ["INSTALLDIR"] in ["/", "/home/rift", "/home/rift/.install",
- "/usr/rift/build/fc20_debug/install/usr/rift", "/usr/rift"]:
+ if os.environ["INSTALLDIR"] in ["/usr/rift",
+ "/usr/rift/build/ub16_debug/install/usr/rift",
+ "/usr/rift/build/fc20_debug/install/usr/rift"]:
cleanup_dir_name = os.environ["INSTALLDIR"] + "/var/rift/"
if args.test_name and not cleanup_dir_name:
@@ -548,8 +675,8 @@
for f in os.listdir(cleanup_dir_name):
if f.endswith(".aof") or f.endswith(".rdb"):
os.remove(os.path.join(cleanup_dir_name, f))
-
- # Remove the persistant DTS recovery files
+
+ # Remove the persistant DTS recovery files
for f in os.listdir(cleanup_dir_name):
if f.endswith(".db"):
os.remove(os.path.join(cleanup_dir_name, f))
@@ -561,35 +688,46 @@
except Exception as e:
print ("Error while cleanup: {}".format(str(e)))
- ha_mode = args.ha_mode
+ datastore = args.datastore
mgmt_ip_list = [] if not args.mgmt_ip_list else args.mgmt_ip_list
#load demo info and create Demo object
- demo = Demo(args.no_ui, ha_mode, mgmt_ip_list, args.test_name)
+ demo = Demo(args.no_ui,
+ datastore,
+ mgmt_ip_list,
+ args.test_name,
+ args.start_auth_svc,
+ args.start_pam_svc,
+ args.external_address)
+
+ if 'use_osm_model' in args and args.use_osm_model:
+ northbound_listing = ["platform_schema_listing.txt",
+ "platform_mgmt_schema_listing.txt",
+ "cli_launchpad_schema_listing.txt"]
+ args.use_xml_mode = True
+
+ else:
+ northbound_listing = ["platform_schema_listing.txt",
+ "platform_mgmt_schema_listing.txt",
+ "cli_launchpad_schema_listing.txt",
+ "cli_launchpad_rift_specific_schema_listing.txt"]
# Create the prepared system from the demo
- system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args,
- northbound_listing="cli_launchpad_schema_listing.txt",
- netconf_trace_override=True)
+ system = rift.vcs.demo.prepared_system_from_demo_and_args(
+ demo, args,
+ northbound_listing=northbound_listing,
+ netconf_trace_override=True)
- # Search for externally accessible IP address with netifaces
- gateways = netifaces.gateways()
- # Check for default route facing interface and then get its ip address
- if 'default' in gateways:
- interface = gateways['default'][netifaces.AF_INET][1]
- confd_ip = netifaces.ifaddresses(interface)[netifaces.AF_INET][0]['addr']
- else:
- # no default gateway. Revert to 127.0.0.1
- confd_ip = "127.0.0.1"
+ confd_ip = get_launchpad_address()
# TODO: This need to be changed when launchpad starts running on multiple VMs
rift.vcs.logger.configure_sink(config_file=None, confd_ip=confd_ip)
# Start the prepared system
system.start()
-
if __name__ == "__main__":
resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY) )
+ os.system('/usr/rift/bin/UpdateHostsFile')
try:
main()
except rift.vcs.demo.ReservationError:
diff --git a/rwlaunchpad/test/launchpad_recovery b/rwlaunchpad/test/launchpad_recovery
index eea5d4a..362dacc 100755
--- a/rwlaunchpad/test/launchpad_recovery
+++ b/rwlaunchpad/test/launchpad_recovery
@@ -79,7 +79,7 @@
@classmethod
def configure_schema(cls):
schema = RwYang.Model.load_and_merge_schema(rwvcs.get_schema(), 'librwcal_yang_gen.so', 'Rwcal')
- cls.model = RwYang.Model.create_libncx()
+ cls.model = RwYang.Model.create_libyang()
cls.model.load_schema_ypbc(schema)
xml = cls.manifest.to_xml_v2(cls.model, 1)
xml = re.sub('rw-manifest:', '', xml)
@@ -96,7 +96,7 @@
manifest = rwmanifest.Manifest()
manifest.bootstrap_phase = rwmanifest.BootstrapPhase.from_dict({
"rwmgmt": {
- "northbound_listing": [ "cli_launchpad_schema_listing.txt" ]
+ "northbound_listing": [ "platform_schema_listing.txt", "platform_mgmt_schema_listing.txt", "cli_launchpad_schema_listing.txt" ]
},
"rwtasklet": {
"plugin_name": "rwinit-c"
@@ -210,15 +210,7 @@
"recovery_action": "RESTART",
"config_ready": True
}
- },
-# {
-# "name": "Start the RW.CLI",
-# "start": {
-# "component_name": "RW.CLI",
-# "recovery_action": "RESTART",
-# "config_ready": True
-# }
-# },
+ },
{
"name": "Start the RW.Proc_1.Restconf",
"start": {
@@ -227,14 +219,6 @@
"config_ready": True
}
},
-# {
-# "name": "Start the RW.Proc_2.RestPortForward",
-# "start": {
-# "component_name": "RW.Proc_2.RestPortForward",
-# "recovery_action": "RESTART",
-# "config_ready": True
-# }
-# },
{
"name": "Start the RW.Proc_3.CalProxy",
"start": {
@@ -364,26 +348,6 @@
"plugin_name": "restconf"
}
},
-# {
-# "component_name": "RW.Proc_2.RestPortForward",
-# "component_type": "RWPROC",
-# "rwproc": {
-# "tasklet": [{
-# "name": "Start RW.RestPortForward for RW.Proc_2.RestPortForward",
-# "component_name": "RW.RestPortForward",
-# "recovery_action": "RESTART",
-# "config_ready": True
-# }]
-# }
-# },
-# {
-# "component_name": "RW.RestPortForward",
-# "component_type": "RWTASKLET",
-# "rwtasklet": {
-# "plugin_directory": "./usr/lib/rift/plugins/restportforward",
-# "plugin_name": "restportforward"
-# }
-# },
{
"component_name": "RW.Proc_3.CalProxy",
"component_type": "RWPROC",
diff --git a/rwlaunchpad/test/mano_error_ut.py b/rwlaunchpad/test/mano_error_ut.py
index e593cee..09b028d 100755
--- a/rwlaunchpad/test/mano_error_ut.py
+++ b/rwlaunchpad/test/mano_error_ut.py
@@ -107,7 +107,7 @@
)
resource_info.update(self._vdu_info)
- response = RwResourceMgrYang.VDUEventData.from_dict(dict(
+ response = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData.from_dict(dict(
event_id=self._event_id,
request_info=self._request_info.as_dict(),
resource_info=resource_info,
@@ -164,7 +164,7 @@
)
resource_info.update(self._link_info)
- response = RwResourceMgrYang.VirtualLinkEventData.from_dict(dict(
+ response = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData.from_dict(dict(
event_id=self._event_id,
request_info=self._request_info.as_dict(),
resource_info=resource_info,
@@ -174,8 +174,8 @@
class ResourceMgrMock(object):
- VDU_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data"
- VLINK_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data"
+ VDU_REQUEST_XPATH = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data"
+ VLINK_REQUEST_XPATH = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data"
def __init__(self, dts, log, loop):
self._log = log
@@ -247,7 +247,7 @@
response_info = None
response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
- schema = RwResourceMgrYang.VirtualLinkEventData().schema()
+ schema = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData().schema()
pathentry = schema.keyspec_to_entry(ks_path)
if action == rwdts.QueryAction.CREATE:
@@ -279,16 +279,14 @@
return
@asyncio.coroutine
- def monitor_vdu_state(response_xpath, pathentry):
+ def monitor_vdu_state(response_xpath, event_id):
self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath)
loop_cnt = 120
while loop_cnt > 0:
self._log.debug("VDU state monitoring: Sleeping for 1 second ")
yield from asyncio.sleep(1, loop = self._loop)
try:
- response_info = self._read_virtual_compute(
- pathentry.key00.event_id
- )
+ response_info = self._read_virtual_compute(event_id)
except Exception as e:
self._log.error(
"VDU state monitoring: Received exception %s "
@@ -313,7 +311,7 @@
### End of while loop. This is only possible if VDU did not reach active state
self._log.info("VDU state monitoring: VDU at xpath :%s did not reached active state in 120 seconds. Aborting monitoring",
response_xpath)
- response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+ response_info = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_ResourceInfo()
response_info.resource_state = 'failed'
yield from self._dts.query_update(response_xpath,
rwdts.XactFlag.ADVISE,
@@ -326,7 +324,7 @@
response_info = None
response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
- schema = RwResourceMgrYang.VDUEventData().schema()
+ schema = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData().schema()
pathentry = schema.keyspec_to_entry(ks_path)
if action == rwdts.QueryAction.CREATE:
@@ -335,7 +333,7 @@
request_msg.request_info,
)
if response_info.resource_state == 'pending':
- asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry),
+ asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry.key00.event_id),
loop = self._loop)
elif action == rwdts.QueryAction.DELETE:
diff --git a/rwlaunchpad/test/mano_ut.py b/rwlaunchpad/test/mano_ut.py
index 69a0d40..20e67a4 100755
--- a/rwlaunchpad/test/mano_ut.py
+++ b/rwlaunchpad/test/mano_ut.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,19 +16,18 @@
# limitations under the License.
#
-
+import argparse
import asyncio
+import gi
+import logging
import os
import sys
+import time
+import types
import unittest
import uuid
import xmlrunner
-import argparse
-import logging
-import time
-import types
-import gi
gi.require_version('RwCloudYang', '1.0')
gi.require_version('RwDts', '1.0')
gi.require_version('RwNsmYang', '1.0')
@@ -51,14 +50,22 @@
RwConfigAgentYang as rwcfg_agent,
RwlogMgmtYang
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
from gi.repository.RwTypes import RwStatus
import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
import rift.tasklets
import rift.test.dts
import rw_peas
+from rift.mano.utils.project import (
+ ManoProject,
+ DEFAULT_PROJECT,
+)
+PROJECT = 'default'
+
openstack_info = {
'username': 'pluto',
'password': 'mypasswd',
@@ -75,93 +82,103 @@
class XPaths(object):
@staticmethod
def nsd(k=None):
- return ("C,/nsd:nsd-catalog/nsd:nsd" +
- ("[nsd:id='{}']".format(k) if k is not None else ""))
+ return ("C,/project-nsd:nsd-catalog/project-nsd:nsd" +
+ ("[project-nsd:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def vld(k=None):
return ("C,/vld:vld-catalog/vld:vld" +
- ("[vld:id='{}']".format(k) if k is not None else ""))
+ ("[vld:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def vnfd(k=None):
- return ("C,/vnfd:vnfd-catalog/vnfd:vnfd" +
- ("[vnfd:id='{}']".format(k) if k is not None else ""))
+ return ("C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd" +
+ ("[project-vnfd:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def vnfr(k=None):
return ("D,/vnfr:vnfr-catalog/vnfr:vnfr" +
- ("[vnfr:id='{}']".format(k) if k is not None else ""))
+ ("[vnfr:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def vlr(k=None):
return ("D,/vlr:vlr-catalog/vlr:vlr" +
- ("[vlr:id='{}']".format(k) if k is not None else ""))
-
- @staticmethod
- def nsd_ref_count(k=None):
- return ("D,/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" +
- ("[rw-nsr:nsd-id-ref='{}']".format(k) if k is not None else ""))
+ ("[vlr:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def vnfd_ref_count(k=None):
return ("D,/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" +
- ("[rw-nsr:nsd-id-ref='{}']".format(k) if k is not None else ""))
+ ("[rw-nsr:nsd-id-ref={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def nsr_config(k=None):
return ("C,/nsr:ns-instance-config/nsr:nsr" +
- ("[nsr:id='{}']".format(k) if k is not None else ""))
+ ("[nsr:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def nsr_opdata(k=None):
return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
- ("[nsr:ns-instance-config-ref='{}']".format(k) if k is not None else ""))
+ ("[nsr:ns-instance-config-ref={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def nsr_config_status(k=None):
return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
- ("[nsr:ns-instance-config-ref='{}']/config_status".format(k) if k is not None else ""))
+ ("[nsr:ns-instance-config-ref={}]/config_status".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def cm_state(k=None):
- if k is None:
- return ("D,/rw-conman:cm-state/rw-conman:cm-nsr")
- else:
- return ("D,/rw-conman:cm-state/rw-conman:cm-nsr" +
- ("[rw-conman:id='{}']".format(k) if k is not None else ""))
+ return ("D,/rw-conman:cm-state/rw-conman:cm-nsr" +
+ ("[rw-conman:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def nsr_scale_group_instance(nsr_id=None, group_name=None, index=None):
return (("D,/nsr:ns-instance-opdata/nsr:nsr") +
- ("[nsr:ns-instance-config-ref='{}']".format(nsr_id) if nsr_id is not None else "") +
+ ("[nsr:ns-instance-config-ref={}]".format(quoted_key(nsr_id)) if nsr_id is not None else "") +
("/nsr:scaling-group-record") +
- ("[nsr:scaling-group-name-ref='{}']".format(group_name) if group_name is not None else "") +
+ ("[nsr:scaling-group-name-ref={}]".format(quoted_key(group_name)) if group_name is not None else "") +
("/nsr:instance") +
- ("[nsr:scaling-group-index-ref='{}']".format(index) if index is not None else ""))
+ ("[nsr:scaling-group-index-ref={}]".format(quoted_key(index)) if index is not None else ""))
@staticmethod
def nsr_scale_group_instance_config(nsr_id=None, group_name=None, index=None):
return (("C,/nsr:ns-instance-config/nsr:nsr") +
- ("[nsr:id='{}']".format(nsr_id) if nsr_id is not None else "") +
+ ("[nsr:id={}]".format(nsr_id) if nsr_id is not None else "") +
("/nsr:scaling-group") +
- ("[nsr:scaling-group-name-ref='{}']".format(group_name) if group_name is not None else "") +
+ ("[nsr:scaling-group-name-ref={}]".format(quoted_key(group_name)) if group_name is not None else "") +
("/nsr:instance") +
- ("[nsr:index='{}']".format(index) if index is not None else ""))
+ ("[nsr:index={}]".format(quoted_key(index)) if index is not None else ""))
+
+ @staticmethod
+ def cloud_account(k=None):
+ return ("C,/rw-cloud:cloud/rw-cloud:account" +
+ ("[rw-cloud:name={}]".format(quoted_key(k)) if k is not None else ""))
+
+ @staticmethod
+ def project(k=None):
+ return ("C,/rw-project:project" +
+ ("[rw-project:name={}]".format(quoted_key(k)) if k is not None else ""))
class ManoQuerier(object):
- def __init__(self, log, dts):
+ def __init__(self, log, dts, project):
self.log = log
self.dts = dts
+ self.project = project
+
+ def add_project(self, xpath):
+ return self.project.add_project(xpath)
@asyncio.coroutine
- def _read_query(self, xpath, do_trace=False):
- self.log.debug("Running XPATH read query: %s (trace: %s)", xpath, do_trace)
+ def _read_query(self, xpath, do_trace=False, project=True):
+ if project:
+ xp = self.add_project(xpath)
+ else:
+ xp = xpath
+ self.log.debug("Running XPATH read query: %s (trace: %s)", xp, do_trace)
flags = rwdts.XactFlag.MERGE
flags += rwdts.XactFlag.TRACE if do_trace else 0
res_iter = yield from self.dts.query_read(
- xpath, flags=flags
+ xp, flags=flags
)
results = []
@@ -173,6 +190,27 @@
return results
@asyncio.coroutine
+ def _delete_query(self, xpath, flags=0):
+ xp = self.add_project(xpath)
+ self.log.debug("Running XPATH delete query: %s (flags: %d)", xp, flags)
+ with self.dts.transaction() as xact:
+ yield from self.dts.query_delete(
+ xp,
+ flags
+ )
+
+ @asyncio.coroutine
+ def _update_query(self, xpath, msg, flags=0):
+ xp = self.add_project(xpath)
+ self.log.debug("Running XPATH update query: %s (flags: %d)", xp, flags)
+ with self.dts.transaction() as xact:
+ yield from self.dts.query_update(
+ xp,
+ flags,
+ msg
+ )
+
+ @asyncio.coroutine
def get_cm_state(self, nsr_id=None):
return (yield from self._read_query(XPaths.cm_state(nsr_id), False))
@@ -183,7 +221,6 @@
@asyncio.coroutine
def get_nsr_scale_group_instance_opdata(self, nsr_id=None, group_name=None, index=None):
return (yield from self._read_query(XPaths.nsr_scale_group_instance(nsr_id, group_name, index), False))
- #return (yield from self._read_query(XPaths.nsr_scale_group_instance(nsr_id, group_name), True))
@asyncio.coroutine
def get_nsr_configs(self, nsr_id=None):
@@ -202,75 +239,39 @@
return (yield from self._read_query(XPaths.vlr(vlr_id)))
@asyncio.coroutine
- def get_nsd_ref_counts(self, nsd_id=None):
- return (yield from self._read_query(XPaths.nsd_ref_count(nsd_id)))
-
- @asyncio.coroutine
def get_vnfd_ref_counts(self, vnfd_id=None):
return (yield from self._read_query(XPaths.vnfd_ref_count(vnfd_id)))
@asyncio.coroutine
def delete_nsr(self, nsr_id):
- with self.dts.transaction() as xact:
- yield from self.dts.query_delete(
- XPaths.nsr_config(nsr_id),
- 0
- #rwdts.XactFlag.TRACE,
- #rwdts.Flag.ADVISE,
- )
+ return (yield from self._delete_query(XPaths.nsr_config(nsr_id)))
@asyncio.coroutine
def delete_nsd(self, nsd_id):
- nsd_xpath = XPaths.nsd(nsd_id)
- self.log.debug("Attempting to delete NSD with path = %s", nsd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_delete(
- nsd_xpath,
- rwdts.XactFlag.ADVISE,
- )
+ return (yield from self._delete_query(XPaths.nsd(nsd_id),
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def delete_vnfd(self, vnfd_id):
- vnfd_xpath = XPaths.vnfd(vnfd_id)
- self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_delete(
- vnfd_xpath,
- rwdts.XactFlag.ADVISE,
- )
+ return (yield from self._delete_query(XPaths.vnfd(vnfd_id),
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def update_nsd(self, nsd_id, nsd_msg):
- nsd_xpath = XPaths.nsd(nsd_id)
- self.log.debug("Attempting to update NSD with path = %s", nsd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_update(
- nsd_xpath,
- rwdts.XactFlag.ADVISE,
- nsd_msg,
- )
+ return (yield from self._update_query(XPaths.nsd(nsd_id), nsd_msg,
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def update_vnfd(self, vnfd_id, vnfd_msg):
- vnfd_xpath = XPaths.vnfd(vnfd_id)
- self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_update(
- vnfd_xpath,
- rwdts.XactFlag.ADVISE,
- vnfd_msg,
- )
+ return (yield from self._update_query(XPaths.vnfd(vnfd_id), vnfd_msg,
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def update_nsr_config(self, nsr_id, nsr_msg):
- nsr_xpath = XPaths.nsr_config(nsr_id)
- self.log.debug("Attempting to update NSR with path = %s", nsr_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_update(
- nsr_xpath,
- rwdts.XactFlag.ADVISE|rwdts.XactFlag.REPLACE,
- nsr_msg,
- )
+ return (yield from self._update_query(
+ XPaths.nsr_config(nsr_id),
+ nsr_msg,
+ rwdts.XactFlag.ADVISE|rwdts.XactFlag.REPLACE))
class ManoTestCase(rift.test.dts.AbstractDTSTest):
@@ -365,44 +366,48 @@
vnfrs = yield from self.querier.get_vnfrs()
self.assertEqual(num_vnfrs, len(vnfrs))
- @asyncio.coroutine
- def verify_nsd_ref_count(self, nsd_id, num_ref):
- nsd_ref_counts = yield from self.querier.get_nsd_ref_counts(nsd_id)
- self.assertEqual(num_ref, nsd_ref_counts[0].instance_ref_count)
+
class DescriptorPublisher(object):
- def __init__(self, log, loop, dts):
+ def __init__(self, log, loop, dts, project):
self.log = log
self.loop = loop
self.dts = dts
+ self.project = project
self._registrations = []
@asyncio.coroutine
def publish(self, w_path, path, desc):
ready_event = asyncio.Event(loop=self.loop)
+ if 'rw-project' in path:
+ w_xp = w_path
+ xp = path
+ else:
+ w_xp = self.project.add_project(w_path)
+ xp = self.project.add_project(path)
@asyncio.coroutine
def on_ready(regh, status):
self.log.debug("Create element: %s, obj-type:%s obj:%s",
- path, type(desc), desc)
+ xp, type(desc), desc)
with self.dts.transaction() as xact:
- regh.create_element(path, desc, xact.xact)
- self.log.debug("Created element: %s, obj:%s", path, desc)
+ regh.create_element(xp, desc, xact.xact)
+ self.log.debug("Created element: %s, obj:%s", xp, desc)
ready_event.set()
handler = rift.tasklets.DTS.RegistrationHandler(
on_ready=on_ready
)
- self.log.debug("Registering path: %s, obj:%s", w_path, desc)
+ self.log.debug("Registering path: %s, obj:%s", w_xp, desc)
reg = yield from self.dts.register(
- w_path,
+ w_xp,
handler,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
)
self._registrations.append(reg)
- self.log.debug("Registered path : %s", w_path)
+ self.log.debug("Registered path : %s", w_xp)
yield from ready_event.wait()
return reg
@@ -413,23 +418,114 @@
reg.deregister()
-class PingPongNsrConfigPublisher(object):
- XPATH = "C,/nsr:ns-instance-config"
+class ProjectPublisher(object):
+ XPATH = "C,/rw-project:project"
- def __init__(self, log, loop, dts, ping_pong, cloud_account_name):
+ def __init__(self, log, loop, dts, project):
self.dts = dts
self.log = log
self.loop = loop
+ self.project = project
self.ref = None
- self.querier = ManoQuerier(log, dts)
+ self.querier = ManoQuerier(log, dts, project)
+ self.publisher = DescriptorPublisher(log, loop,
+ dts, project)
- self.nsr_config = rwnsryang.YangData_Nsr_NsInstanceConfig()
+ self._ready_event = asyncio.Event(loop=self.loop)
+ asyncio.ensure_future(self.register(), loop=loop)
- nsr = rwnsryang.YangData_Nsr_NsInstanceConfig_Nsr()
+ @asyncio.coroutine
+ def register(self):
+ @asyncio.coroutine
+ def on_ready(regh, status):
+ self._ready_event.set()
+
+ self.log.debug("Registering path: %s", ProjectPublisher.XPATH)
+ self.reg = yield from self.dts.register(
+ ProjectPublisher.XPATH,
+ flags=rwdts.Flag.PUBLISHER,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_ready=on_ready,
+ ),
+ )
+
+ def deregister(self):
+ if self.reg is not None:
+ self.reg.deregister()
+
+ @asyncio.coroutine
+ def publish_project(self, config, xpath, xpath_wild):
+ # Publish project
+ self.log.debug("Publishing cloud_account path: %s - %s, type:%s, obj:%s",
+ xpath, xpath_wild, type(config), config)
+ yield from self.publisher.publish(xpath_wild, xpath, config)
+
+
+class CloudAccountPublisher(object):
+ XPATH = "C,/rw-cloud:cloud"
+
+ def __init__(self, log, loop, dts, project):
+ self.dts = dts
+ self.log = log
+ self.loop = loop
+ self.project = project
+ self.ref = None
+
+ self.querier = ManoQuerier(log, dts, project)
+ self.publisher = DescriptorPublisher(log, loop,
+ dts, project)
+
+ self.xpath = self.project.add_project(CloudAccountPublisher.XPATH)
+
+ self._ready_event = asyncio.Event(loop=self.loop)
+ asyncio.ensure_future(self.register(), loop=loop)
+
+ @asyncio.coroutine
+ def register(self):
+ @asyncio.coroutine
+ def on_ready(regh, status):
+ self._ready_event.set()
+
+ self.log.debug("Registering path: %s", self.xpath)
+ self.reg = yield from self.dts.register(
+ self.xpath,
+ flags=rwdts.Flag.PUBLISHER,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_ready=on_ready,
+ ),
+ )
+
+ def deregister(self):
+ if self.reg is not None:
+ self.reg.deregister()
+
+ @asyncio.coroutine
+ def publish_account(self, account, xpath, xpath_wild):
+ # Publish cloud account
+ self.log.debug("Publishing cloud_account path: %s - %s, type:%s, obj:%s",
+ xpath, xpath_wild, type(account), account)
+ yield from self.publisher.publish(xpath_wild, xpath, account)
+
+
+class PingPongNsrConfigPublisher(object):
+ XPATH = "C,/nsr:ns-instance-config"
+
+ def __init__(self, log, loop, dts, ping_pong, cloud_account_name, project):
+ self.dts = dts
+ self.log = log
+ self.loop = loop
+ self.project = project
+ self.ref = None
+
+ self.querier = ManoQuerier(log, dts, project)
+ self.xpath = self.project.add_project(PingPongNsrConfigPublisher.XPATH)
+ self.nsr_config = rwnsryang.YangData_RwProject_Project_NsInstanceConfig()
+
+ nsr = rwnsryang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "ns1.{}".format(nsr.id)
- nsr.nsd = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_Nsd()
+ nsr.nsd = nsryang.YangData_RwProject_Project_NsInstanceConfig_Nsr_Nsd()
nsr.nsd.from_dict(ping_pong.ping_pong_nsd.nsd.as_dict())
nsr.cloud_account = cloud_account_name
@@ -439,8 +535,9 @@
#'cloud_account':'mock_account1'
})
- inputs = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter()
- inputs.xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(ping_pong.nsd_id)
+ inputs = nsryang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter()
+ inputs.xpath = self.project.add_project(
+ "/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id={}]/project-nsd:name".format(quoted_key(ping_pong.nsd_id)))
inputs.value = "inigo montoya"
fast_cpu = {'metadata_key': 'FASTCPU', 'metadata_value': 'True'}
@@ -488,9 +585,9 @@
def on_ready(regh, status):
self._ready_event.set()
- self.log.debug("Registering path: %s", PingPongNsrConfigPublisher.XPATH)
+ self.log.debug("Registering path: %s", self.xpath)
self.reg = yield from self.dts.register(
- PingPongNsrConfigPublisher.XPATH,
+ self.xpath,
flags=rwdts.Flag.PUBLISHER,
handler=rift.tasklets.DTS.RegistrationHandler(
on_ready=on_ready,
@@ -503,7 +600,7 @@
yield from self._ready_event.wait()
with self.dts.transaction() as xact:
self.reg.create_element(
- PingPongNsrConfigPublisher.XPATH,
+ self.xpath,
self.nsr_config,
xact=xact.xact,
)
@@ -520,7 +617,7 @@
})
with self.dts.transaction() as xact:
self.reg.update_element(
- PingPongNsrConfigPublisher.XPATH,
+ self.xpath,
self.nsr_config,
xact=xact.xact,
)
@@ -539,7 +636,7 @@
"cloud_type" : cloud_type,
construct_type : construct_value,
})
-
+
def create_vnfd_placement_group_map(self,
nsr,
@@ -555,21 +652,16 @@
"cloud_type" : cloud_type,
construct_type : construct_value,
})
-
-
+
+
@asyncio.coroutine
def delete_scale_group_instance(self, group_name, index):
self.log.debug("Deleting scale group %s instance %s", group_name, index)
#del self.nsr_config.nsr[0].scaling_group[0].instance[0]
- xpath = XPaths.nsr_scale_group_instance_config(self.nsr_config.nsr[0].id, group_name, index)
+ xpath = self.project.add_project(
+ XPaths.nsr_scale_group_instance_config(self.nsr_config.nsr[0].id,
+ group_name, index))
yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
- #with self.dts.transaction() as xact:
- # self.reg.update_element(
- # PingPongNsrConfigPublisher.XPATH,
- # self.nsr_config,
- # flags=rwdts.XactFlag.REPLACE,
- # xact=xact.xact,
- # )
def deregister(self):
if self.reg is not None:
@@ -617,10 +709,12 @@
def update_vnf_cloud_map(self,vnf_cloud_map):
self.log.debug("Modifying NSR to add VNF cloud account map: {}".format(vnf_cloud_map))
for vnf_index,cloud_acct in vnf_cloud_map.items():
- vnf_maps = [vnf_map for vnf_map in self.nsr_config.nsr[0].vnf_cloud_account_map if vnf_index == vnf_map.member_vnf_index_ref]
+ vnf_maps = [vnf_map for vnf_map in \
+ self.nsr_config.nsr[0].vnf_cloud_account_map \
+ if vnf_index == vnf_map.member_vnf_index_ref]
if vnf_maps:
vnf_maps[0].cloud_account = cloud_acct
- else:
+ else:
self.nsr_config.nsr[0].vnf_cloud_account_map.add().from_dict({
'member_vnf_index_ref':vnf_index,
'cloud_account':cloud_acct
@@ -628,13 +722,16 @@
class PingPongDescriptorPublisher(object):
- def __init__(self, log, loop, dts, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1):
+ def __init__(self, log, loop, dts, project,
+ num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1):
self.log = log
self.loop = loop
self.dts = dts
+ self.project = project
- self.querier = ManoQuerier(self.log, self.dts)
- self.publisher = DescriptorPublisher(self.log, self.loop, self.dts)
+ self.querier = ManoQuerier(self.log, self.dts, self.project)
+ self.publisher = DescriptorPublisher(self.log, self.loop,
+ self.dts, self.project)
self.ping_vnfd, self.pong_vnfd, self.ping_pong_nsd = \
ping_pong_nsd.generate_ping_pong_descriptors(
pingcount=1,
@@ -642,15 +739,9 @@
internal_vlr_count=num_internal_vlrs,
num_vnf_vms=2,
mano_ut=True,
- use_scale_group=True,
+ use_scale_group=False,
use_mon_params=False,
)
-
- self.config_dir = os.path.join(os.getenv('RIFT_ARTIFACTS'),
- "launchpad/libs",
- self.ping_pong_nsd.id,
- "config")
-
@property
def nsd_id(self):
return self.ping_pong_nsd.id
@@ -717,8 +808,6 @@
)
-
-
class ManoTestCase(rift.test.dts.AbstractDTSTest):
"""
DTS GI interface unittests
@@ -755,9 +844,9 @@
@staticmethod
def get_cal_account(account_type, account_name):
"""
- Creates an object for class RwcalYang.Clo
+ Creates an object for class RwcalYang.Cloud
"""
- account = rwcloudyang.CloudAccount()
+ account = rwcloudyang.YangData_RwProject_Project_Cloud_Account()
if account_type == 'mock':
account.name = account_name
account.account_type = "mock"
@@ -773,13 +862,33 @@
return account
@asyncio.coroutine
- def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1"):
+ def configure_project(self, project=None):
+ if project is None:
+ project = self.project
+
+ proj_xpath = "C,{}/project-config".format(project.prefix)
+ self.log.info("Creating project: {} with {}".
+ format(proj_xpath, project.config.as_dict()))
+ xpath_wild = "C,/rw-project:project/project-config"
+ yield from self.project_publisher.publish_project(project.config,
+ proj_xpath,
+ xpath_wild)
+
+ @asyncio.coroutine
+ def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1", project=None):
account = self.get_cal_account(cloud_type, cloud_name)
- account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name)
self.log.info("Configuring cloud-account: %s", account)
- yield from dts.query_create(account_xpath,
- rwdts.XactFlag.ADVISE,
- account)
+ if project is None:
+ project = self.project
+ xpath = project.add_project(XPaths.cloud_account(account.name))
+ xpath_wild = project.add_project(XPaths.cloud_account())
+
+ # account_xpath = project.add_project(
+ # "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name={}]".format(quoted_key(cloud_name)))
+ # yield from dts.query_create(account_xpath,
+ # rwdts.XactFlag.ADVISE,
+ # account)
+ yield from self.cloud_publisher.publish_account(account, xpath, xpath_wild)
@asyncio.coroutine
def wait_tasklets(self):
@@ -789,22 +898,74 @@
self.log.debug("STARTING - %s", self.id())
self.tinfo = self.new_tinfo(self.id())
self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
- self.ping_pong = PingPongDescriptorPublisher(self.log, self.loop, self.dts)
- self.querier = ManoQuerier(self.log, self.dts)
+ self.project = ManoProject(self.log,
+ name=DEFAULT_PROJECT)
+ self.project1 = ManoProject(self.log,
+ name='test-1')
+ self.ping_pong = PingPongDescriptorPublisher(self.log, self.loop,
+ self.dts, self.project)
+ self.querier = ManoQuerier(self.log, self.dts, self.project)
+ self.project_publisher = ProjectPublisher(
+ self.log,
+ loop,
+ self.dts,
+ self.project
+ )
+ self.cloud_publisher = CloudAccountPublisher(
+ self.log,
+ loop,
+ self.dts,
+ self.project
+ )
self.nsr_publisher = PingPongNsrConfigPublisher(
self.log,
loop,
self.dts,
self.ping_pong,
"mock_account",
+ self.project,
)
def test_create_nsr_record(self):
@asyncio.coroutine
+ def verify_projects(termination=False):
+ self.log.debug("Verifying projects = %s", XPaths.project())
+
+ accts = yield from self.querier._read_query(XPaths.project(),
+ project=False)
+ projs = []
+ for acc in accts:
+ self.log.debug("Project: {}".format(acc.as_dict()))
+ if acc.name not in projs:
+ projs.append(acc.name)
+ self.log.debug("Merged: {}".format(projs))
+ self.assertEqual(2, len(projs))
+
+ @asyncio.coroutine
+ def verify_cloud_accounts(termination=False):
+ self.log.debug("Verifying cloud accounts = %s", XPaths.cloud_account())
+
+ accts = yield from self.querier._read_query(XPaths.cloud_account())
+ self.assertEqual(2, len(accts))
+
+ accts = yield from self.querier._read_query(
+ self.project1.add_project(XPaths.cloud_account()), project=False)
+ self.assertEqual(1, len(accts))
+
+ accts = yield from self.querier._read_query(
+ "C,/rw-project:project/rw-cloud:cloud/rw-cloud:account",
+ project=False)
+ self.assertEqual(3, len(accts))
+
+ accts = yield from self.querier._read_query(
+ "C,/rw-project:project/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='mock_account']",
+ project=False)
+ self.assertEqual(2, len(accts))
+
+ @asyncio.coroutine
def verify_cm_state(termination=False, nsrid=None):
self.log.debug("Verifying cm_state path = %s", XPaths.cm_state(nsrid))
- #print("###>>> Verifying cm_state path:", XPaths.cm_state(nsrid))
loop_count = 10
loop_sleep = 10
@@ -878,7 +1039,7 @@
nsr_config = nsr_configs[0]
self.assertEqual(
- "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(self.ping_pong.nsd_id),
+ "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id={}]/project-nsd:name".format(quoted_key(self.ping_pong.nsd_id)),
nsr_config.input_parameter[0].xpath,
)
@@ -895,6 +1056,7 @@
nsr_opdata_l = yield from self.querier.get_nsr_opdatas(nsrid)
self.assertEqual(1, len(nsr_opdata_l))
nsr_opdata = nsr_opdata_l[0].as_dict()
+ self.log.debug("NSR opdata: {}".format(nsr_opdata))
if ("configured" == nsr_opdata['config_status']):
print("\n###>>> NSR Config Status 'configured' OK <<<###\n")
return
@@ -974,14 +1136,6 @@
self.log.debug("Sleeping for 10 seconds")
yield from asyncio.sleep(10, loop=self.loop)
- @asyncio.coroutine
- def verify_nsd_ref_count(termination):
- self.log.debug("Verifying nsd ref count= %s", XPaths.nsd_ref_count())
- res_iter = yield from self.dts.query_read(XPaths.nsd_ref_count())
-
- for i in res_iter:
- result = yield from i
- self.log.debug("Got nsd ref count record %s", result)
@asyncio.coroutine
def verify_vnfd_ref_count(termination):
@@ -1024,13 +1178,15 @@
#yield from verify_vlr_record(termination)
yield from verify_nsr_opdata(termination)
yield from verify_nsr_config(termination)
- yield from verify_nsd_ref_count(termination)
yield from verify_vnfd_ref_count(termination)
# Config Manager
yield from verify_cm_state(termination, nsrid)
yield from verify_nsr_config_status(termination, nsrid)
+ yield from verify_cloud_account(termination)
+ yield from verify_project_record(termination)
+
@asyncio.coroutine
def verify_scale_instance(index):
self.log.debug("Verifying scale record path = %s, Termination=%d",
@@ -1074,12 +1230,20 @@
def run_test():
yield from self.wait_tasklets()
+ yield from self.configure_project()
+ yield from self.configure_project(project=self.project1)
cloud_type = "mock"
yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account")
yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account1")
+ yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account",
+ project=self.project1)
+
+ yield from verify_cloud_accounts()
+ yield from verify_projects()
yield from self.ping_pong.publish_desciptors()
+ return
# Attempt deleting VNFD not in use
yield from self.ping_pong.update_ping_vnfd()
diff --git a/rwlaunchpad/test/mgmt_recovery.py b/rwlaunchpad/test/mgmt_recovery.py
index 29f0ab0..c2392c2 100755
--- a/rwlaunchpad/test/mgmt_recovery.py
+++ b/rwlaunchpad/test/mgmt_recovery.py
@@ -266,7 +266,6 @@
ConfigManagerTasklet(),
UIServer(),
RedisServer(),
- rift.vcs.RestPortForwardTasklet(),
rift.vcs.RestconfTasklet(),
rift.vcs.RiftCli(),
rift.vcs.uAgentTasklet(),
@@ -275,7 +274,7 @@
standby_procs = [
RedisServer(),
- rift.vcs.uAgentTasklet(mode_active=False),
+ rift.vcs.uAgentTasklet()
]
restart_procs = [
@@ -358,7 +357,7 @@
# Create the prepared system from the demo
system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args,
- northbound_listing="cli_launchpad_schema_listing.txt",
+ northbound_listing=["platform_schema_listing.txt", "platform_mgmt_schema_listing.txt", "cli_launchpad_schema_listing.txt"],
netconf_trace_override=True)
confd_ip = socket.gethostbyname(socket.gethostname())
diff --git a/rwlaunchpad/test/pytest/lp_kt_utm_test.py b/rwlaunchpad/test/pytest/lp_kt_utm_test.py
index 0a8d6ba..584d9b9 100644
--- a/rwlaunchpad/test/pytest/lp_kt_utm_test.py
+++ b/rwlaunchpad/test/pytest/lp_kt_utm_test.py
@@ -41,10 +41,10 @@
gi.require_version('RwNsrYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
gi.require_version('RwConmanYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
NsrYang,
RwBaseYang,
RwCloudYang,
@@ -54,7 +54,7 @@
RwNsrYang,
RwResourceMgrYang,
RwConmanYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
VldYang,
)
@@ -180,14 +180,14 @@
pass
-def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1", project="default"):
logger.info("Waiting for onboard trans_id %s to complete",
transaction_id)
start_time = time.time()
while (time.time() - start_time) < timeout_secs:
r = requests.get(
- 'http://{host}:4567/api/upload/{t_id}/state'.format(
- host=host, t_id=transaction_id
+ 'http://{host}:8008/api/operational/project/{proj}/create-jobs/job/{t_id}'.format(
+ host=host, proj=project, t_id=transaction_id
)
)
state = r.json()
@@ -206,7 +206,7 @@
raise DescriptorOnboardError(state)
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "UTM-only"
nsr.short_name = "UTM-only"
@@ -247,7 +247,7 @@
cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
def test_configure_pools(self, resource_mgr_proxy):
- pools = RwResourceMgrYang.ResourcePools.from_dict({
+ pools = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgrConfig_ResourcePools.from_dict({
"pools": [{ "name": "vm_pool_a",
"resource_type": "compute",
"pool_type" : "dynamic"},
@@ -255,29 +255,14 @@
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
- def test_configure_resource_orchestrator(self, so_proxy):
- cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
- 'ro_port' : 2022,
- 'ro_username' : 'admin',
- 'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
-
- def test_configure_service_orchestrator(self, nsm_proxy):
- cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
- 'cm_port' : 2022,
- 'cm_username' : 'admin',
- 'cm_password' : 'admin'})
- nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
-
-
def test_onboard_ktutm_vnfd(self, logger, vnfd_proxy, kt_utm_vnfd_package_file):
logger.info("Onboarding kt_utm_vnfd package: %s", kt_utm_vnfd_package_file)
trans_id = upload_descriptor(logger, kt_utm_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 1, "There should only be a single vnfd"
vnfd = vnfds[0]
@@ -288,19 +273,19 @@
trans_id = upload_descriptor(logger, utm_only_nsd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
nsd = nsds[0]
def test_instantiate_utm_only_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
nsr = create_nsr_from_nsd_id(nsd.id)
- nsr_proxy.merge_config('/ns-instance-config', nsr)
+ nsr_proxy.merge_config('/rw-project:project[rw-project:name="default"]/ns-instance-config', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
assert len(nsrs) == 1
assert nsrs[0].ns_instance_config_ref == nsr.id
diff --git a/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py b/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py
index 705565b..19b637d 100644
--- a/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py
+++ b/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
"""
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -39,13 +39,13 @@
gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwNsmYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwNsrYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
gi.require_version('RwConmanYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
NsrYang,
RwBaseYang,
RwCloudYang,
@@ -55,7 +55,7 @@
RwNsrYang,
RwResourceMgrYang,
RwConmanYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
VldYang,
)
@@ -197,14 +197,14 @@
pass
-def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1", project="default"):
logger.info("Waiting for onboard trans_id %s to complete",
transaction_id)
start_time = time.time()
while (time.time() - start_time) < timeout_secs:
r = requests.get(
- 'http://{host}:4567/api/upload/{t_id}/state'.format(
- host=host, t_id=transaction_id
+ 'http://{host}:8008/api/operational/project/{proj}/create-jobs/job/{t_id}/'.format(
+ host=host, proj=project, t_id=transaction_id
)
)
state = r.json()
@@ -223,7 +223,7 @@
raise DescriptorOnboardError(state)
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "UTM-WIMS"
nsr.short_name = "UTM-WIMS"
@@ -261,10 +261,10 @@
cloud_account.openstack.tenant = 'demo'
cloud_account.openstack.mgmt_network = 'private'
- cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+ cloud_proxy.merge_config("/rw-project:project/rw-cloud:cloud-account", cloud_account)
def test_configure_pools(self, resource_mgr_proxy):
- pools = RwResourceMgrYang.ResourcePools.from_dict({
+ pools = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgrConfig_ResourcePools.from_dict({
"pools": [{ "name": "vm_pool_a",
"resource_type": "compute",
"pool_type" : "dynamic"},
@@ -272,29 +272,14 @@
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
- def test_configure_resource_orchestrator(self, so_proxy):
- cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
- 'ro_port' : 2022,
- 'ro_username' : 'admin',
- 'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
-
- def test_configure_service_orchestrator(self, nsm_proxy):
- cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
- 'cm_port' : 2022,
- 'cm_username' : 'admin',
- 'cm_password' : 'admin'})
- nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
-
-
def test_onboard_ktutm_vnfd(self, logger, vnfd_proxy, kt_utm_vnfd_package_file):
logger.info("Onboarding kt_utm_vnfd package: %s", kt_utm_vnfd_package_file)
trans_id = upload_descriptor(logger, kt_utm_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 1, "There should only be a single vnfd"
vnfd = vnfds[0]
@@ -305,7 +290,7 @@
trans_id = upload_descriptor(logger, kt_wims_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 2, "There should only be two vnfd"
assert "kt_wims_vnfd" in [vnfds[0].name, vnfds[1].name]
@@ -315,19 +300,19 @@
trans_id = upload_descriptor(logger, utm_wims_nsd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
nsd = nsds[0]
def test_instantiate_utm_wims_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
nsr = create_nsr_from_nsd_id(nsd.id)
- nsr_proxy.merge_config('/ns-instance-config', nsr)
+ nsr_proxy.merge_config('/rw-project:project[rw-project:name="default"]/ns-instance-config', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
assert len(nsrs) == 1
assert nsrs[0].ns_instance_config_ref == nsr.id
diff --git a/rwlaunchpad/test/pytest/lp_test.py b/rwlaunchpad/test/pytest/lp_test.py
index b987b35..8600d5d 100644
--- a/rwlaunchpad/test/pytest/lp_test.py
+++ b/rwlaunchpad/test/pytest/lp_test.py
@@ -22,42 +22,42 @@
@brief Launchpad Module Test
"""
+import datetime
+import gi
import json
import logging
import os
import pytest
-import shlex
import requests
+import shlex
import subprocess
import time
import uuid
-import datetime
-import gi
gi.require_version('RwBaseYang', '1.0')
gi.require_version('RwCloudYang', '1.0')
-gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwlogMgmtYang', '1.0')
gi.require_version('RwNsmYang', '1.0')
-gi.require_version('RwNsmYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
gi.require_version('RwConmanYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
NsrYang,
RwBaseYang,
RwCloudYang,
- RwIwpYang,
RwlogMgmtYang,
RwNsmYang,
RwNsrYang,
RwResourceMgrYang,
RwConmanYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
VldYang,
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
logging.basicConfig(level=logging.DEBUG)
@@ -76,11 +76,6 @@
@pytest.fixture(scope='module')
-def iwp_proxy(request, mgmt_session):
- return mgmt_session.proxy(RwIwpYang)
-
-
-@pytest.fixture(scope='module')
def rwlog_mgmt_proxy(request, mgmt_session):
return mgmt_session.proxy(RwlogMgmtYang)
@@ -172,7 +167,7 @@
def create_nsr_from_nsd_id(nsd_id):
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "pingpong_{}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
nsr.short_name = "nsr_short_name"
@@ -181,8 +176,8 @@
nsr.admin_status = "ENABLED"
nsr.cloud_account = "openstack"
- param = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter()
- param.xpath = '/nsd:nsd-catalog/nsd:nsd/nsd:vendor'
+ param = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter()
+ param.xpath = '/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd/project-nsd:vendor'
param.value = "rift-o-matic"
nsr.input_parameter.append(param)
@@ -208,14 +203,14 @@
pass
-def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1", project="default"):
logger.info("Waiting for onboard trans_id %s to complete",
transaction_id)
start_time = time.time()
while (time.time() - start_time) < timeout_secs:
r = requests.get(
- 'http://{host}:4567/api/upload/{t_id}/state'.format(
- host=host, t_id=transaction_id
+ 'http://{host}:8008/api/operational/project/{proj}/create-jobs/job/{t_id}'.format(
+ host=host, proj=project, t_id=transaction_id
)
)
state = r.json()
@@ -251,7 +246,7 @@
rwlog_mgmt_proxy.merge_config("/rwlog-mgmt:logging", logging)
def test_configure_cloud_account(self, cloud_proxy, logger):
- cloud_account = RwCloudYang.CloudAccount()
+ cloud_account = RwCloudYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
# cloud_account.name = "cloudsim_proxy"
# cloud_account.account_type = "cloudsim_proxy"
cloud_account.name = "openstack"
@@ -269,7 +264,7 @@
trans_id = upload_descriptor(logger, ping_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 1, "There should only be a single vnfd"
vnfd = vnfds[0]
@@ -280,7 +275,7 @@
trans_id = upload_descriptor(logger, pong_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 2, "There should be two vnfds"
assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name]
@@ -290,20 +285,20 @@
trans_id = upload_descriptor(logger, ping_pong_nsd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
nsd = nsds[0]
assert nsd.name == "ping_pong_nsd"
def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
nsr = create_nsr_from_nsd_id(nsd.id)
- rwnsr_proxy.merge_config('/ns-instance-config', nsr)
+ rwnsr_proxy.merge_config('/rw-project:project[rw-project:name="default"]/ns-instance-config', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
assert len(nsrs) == 1
assert nsrs[0].ns_instance_config_ref == nsr.id
@@ -383,8 +378,8 @@
# assert False, "Did not find all ping and pong component in time"
#def test_terminate_ping_pong_ns(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
- # nsr_configs = nsr_proxy.get_config('/ns-instance-config')
+ # nsr_configs = nsr_proxy.get_config('/rw-project:project[rw-project:name="default"]/ns-instance-config')
# nsr = nsr_configs.nsr[0]
# nsr_id = nsr.id
- # nsr_configs = nsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(nsr_id))
+ # nsr_configs = nsr_proxy.delete_config("/ns-instance-config/nsr[id={}]".format(quoted_key(nsr_id)))
diff --git a/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py b/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py
index 16a8990..4583a4a 100644
--- a/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py
+++ b/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
"""
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -35,7 +35,7 @@
import gi
gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwCloudYang', '1.0')
gi.require_version('RwBaseYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
@@ -44,7 +44,19 @@
-from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+from gi.repository import (
+ RwIwpYang,
+ ProjectNsdYang as NsdYang,
+ NsrYang,
+ RwNsrYang,
+ VldYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwCloudYang,
+ RwBaseYang,
+ RwResourceMgrYang,
+ RwConmanYang,
+ RwNsmYang
+)
logging.basicConfig(level=logging.DEBUG)
@@ -172,7 +184,7 @@
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "TG-2Vrouter-TS EPA"
nsr.short_name = "TG-2Vrouter-TS EPA"
@@ -201,14 +213,14 @@
pass
-def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1", project='default'):
logger.info("Waiting for onboard trans_id %s to complete",
transaction_id)
start_time = time.time()
while (time.time() - start_time) < timeout_secs:
r = requests.get(
- 'http://{host}:4567/api/upload/{t_id}/state'.format(
- host=host, t_id=transaction_id
+ 'http://{host}:8008/api/operational/project/{proj}/create-jobs/job/{t_id}'.format(
+ host=host, proj=project, t_id=transaction_id
)
)
state = r.json()
@@ -240,10 +252,10 @@
cloud_account.openstack.tenant = 'demo'
cloud_account.openstack.mgmt_network = 'private'
- cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+ cloud_proxy.merge_config("/rw-project:project/rw-cloud:cloud-account", cloud_account)
def test_configure_pools(self, resource_mgr_proxy):
- pools = RwResourceMgrYang.ResourcePools.from_dict({
+ pools = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgrConfig_ResourcePools.from_dict({
"pools": [{ "name": "vm_pool_a",
"resource_type": "compute",
"pool_type" : "dynamic"},
@@ -251,29 +263,14 @@
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
- def test_configure_resource_orchestrator(self, so_proxy):
- cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
- 'ro_port' : 2022,
- 'ro_username' : 'admin',
- 'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
-
- def test_configure_service_orchestrator(self, nsm_proxy):
- cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
- 'cm_port' : 2022,
- 'cm_username' : 'admin',
- 'cm_password' : 'admin'})
- nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
-
-
def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file):
logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file)
trans_id = upload_descriptor(logger, tg_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 1, "There should be one vnfds"
assert "trafgen_vnfd" in [vnfds[0].name]
@@ -283,7 +280,7 @@
trans_id = upload_descriptor(logger, vrouter_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 2, "There should be two vnfds"
assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name]
@@ -293,7 +290,7 @@
trans_id = upload_descriptor(logger, ts_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 3, "There should be three vnfds"
assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name]
@@ -303,7 +300,7 @@
trans_id = upload_descriptor(logger, tg_2vrouter_ts_nsd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
nsd = nsds[0]
@@ -311,13 +308,13 @@
assert nsd.short_name == "tg_2vrouter_ts_nsd"
def test_instantiate_tg_2vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
nsr = create_nsr_from_nsd_id(nsd.id)
- nsr_proxy.merge_config('/ns-instance-config', nsr)
+ nsr_proxy.merge_config('/rw-project:project[rw-project:name="default"]/ns-instance-config', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
assert len(nsrs) == 1
assert nsrs[0].ns_instance_config_ref == nsr.id
diff --git a/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py b/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py
index ed00a25..f22c88f 100644
--- a/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py
+++ b/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
"""
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -35,16 +35,26 @@
import gi
gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwCloudYang', '1.0')
gi.require_version('RwBaseYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
gi.require_version('RwConmanYang', '1.0')
gi.require_version('RwNsmYang', '1.0')
-
-
-from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+from gi.repository import (
+ RwIwpYang,
+ ProjectNsdYang as NsdYang,
+ NsrYang,
+ RwNsrYang,
+ VldYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwCloudYang,
+ RwBaseYang,
+ RwResourceMgrYang,
+ RwConmanYang,
+ RwNsmYang
+ )
logging.basicConfig(level=logging.DEBUG)
@@ -172,7 +182,7 @@
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "TG-2Vrouter-TS EPA"
nsr.short_name = "TG-2Vrouter-TS EPA"
@@ -201,14 +211,14 @@
pass
-def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1", project="default"):
logger.info("Waiting for onboard trans_id %s to complete",
transaction_id)
start_time = time.time()
while (time.time() - start_time) < timeout_secs:
r = requests.get(
- 'http://{host}:4567/api/upload/{t_id}/state'.format(
- host=host, t_id=transaction_id
+ 'http://{host}:8008/api/operational/project/{proj}/create-jobs/job/{t_id}'.format(
+ host=host, proj=project, t_id=transaction_id
)
)
state = r.json()
@@ -240,10 +250,10 @@
cloud_account.openstack.tenant = 'demo'
cloud_account.openstack.mgmt_network = 'private'
- cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+ cloud_proxy.merge_config("/rw-project:project/rw-cloud:cloud-account", cloud_account)
def test_configure_pools(self, resource_mgr_proxy):
- pools = RwResourceMgrYang.ResourcePools.from_dict({
+ pools = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgrConfig_ResourcePools.from_dict({
"pools": [{ "name": "vm_pool_a",
"resource_type": "compute",
"pool_type" : "dynamic"},
@@ -251,29 +261,14 @@
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
- def test_configure_resource_orchestrator(self, so_proxy):
- cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
- 'ro_port' : 2022,
- 'ro_username' : 'admin',
- 'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
-
- def test_configure_service_orchestrator(self, nsm_proxy):
- cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
- 'cm_port' : 2022,
- 'cm_username' : 'admin',
- 'cm_password' : 'admin'})
- nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
-
-
def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file):
logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file)
trans_id = upload_descriptor(logger, tg_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 1, "There should be one vnfds"
assert "trafgen_vnfd" in [vnfds[0].name]
@@ -283,7 +278,7 @@
trans_id = upload_descriptor(logger, vrouter_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 2, "There should be two vnfds"
assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name]
@@ -293,7 +288,7 @@
trans_id = upload_descriptor(logger, ts_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 3, "There should be three vnfds"
assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name]
@@ -303,7 +298,7 @@
trans_id = upload_descriptor(logger, tg_2vrouter_ts_nsd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
nsd = nsds[0]
@@ -311,13 +306,13 @@
assert nsd.short_name == "tg_2vrouter_ts_nsd"
def test_instantiate_tg_2vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
nsr = create_nsr_from_nsd_id(nsd.id)
- nsr_proxy.merge_config('/ns-instance-config', nsr)
+ nsr_proxy.merge_config('/rw-project:project[rw-project:name="default"]/ns-instance-config', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
assert len(nsrs) == 1
assert nsrs[0].ns_instance_config_ref == nsr.id
diff --git a/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py b/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py
index 4d6e345..60c20a3 100644
--- a/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py
+++ b/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
"""
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -35,7 +35,7 @@
import gi
gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwCloudYang', '1.0')
gi.require_version('RwBaseYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
@@ -43,7 +43,19 @@
gi.require_version('RwNsmYang', '1.0')
-from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+from gi.repository import (
+ RwIwpYang,
+ ProjectNsdYang,
+ NsrYang,
+ RwNsrYang,
+ VldYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwCloudYang,
+ RwBaseYang,
+ RwResourceMgrYang,
+ RwConmanYang,
+ RwNsmYang
+ )
logging.basicConfig(level=logging.DEBUG)
@@ -171,7 +183,7 @@
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "TG-Vrouter-TS-EPA-SRIOV"
nsr.short_name = "TG-Vrouter-TS-EPA-SRIOV"
@@ -200,14 +212,14 @@
pass
-def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1", project="default"):
logger.info("Waiting for onboard trans_id %s to complete",
transaction_id)
start_time = time.time()
while (time.time() - start_time) < timeout_secs:
r = requests.get(
- 'http://{host}:4567/api/upload/{t_id}/state'.format(
- host=host, t_id=transaction_id
+ 'http://{host}:8008/api/operational/project/{proj}/create-jobs/job/{t_id}'.format(
+ host=host, proj=project, t_id=transaction_id
)
)
state = r.json()
@@ -239,10 +251,10 @@
cloud_account.openstack.tenant = 'demo'
cloud_account.openstack.mgmt_network = 'private'
- cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+ cloud_proxy.merge_config("/rw-project:project/rw-cloud:cloud-account", cloud_account)
def test_configure_pools(self, resource_mgr_proxy):
- pools = RwResourceMgrYang.ResourcePools.from_dict({
+ pools = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgrConfig_ResourcePools.from_dict({
"pools": [{ "name": "vm_pool_a",
"resource_type": "compute",
"pool_type" : "dynamic"},
@@ -250,29 +262,14 @@
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
- def test_configure_resource_orchestrator(self, so_proxy):
- cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
- 'ro_port' : 2022,
- 'ro_username' : 'admin',
- 'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
-
- def test_configure_service_orchestrator(self, nsm_proxy):
- cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
- 'cm_port' : 2022,
- 'cm_username' : 'admin',
- 'cm_password' : 'admin'})
- nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
-
-
def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file):
logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file)
trans_id = upload_descriptor(logger, tg_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 1, "There should be one vnfds"
assert "trafgen_vnfd" in [vnfds[0].name]
@@ -282,7 +279,7 @@
trans_id = upload_descriptor(logger, vrouter_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 2, "There should be two vnfds"
assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name]
@@ -292,7 +289,7 @@
trans_id = upload_descriptor(logger, ts_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 3, "There should be three vnfds"
assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name]
@@ -302,20 +299,20 @@
trans_id = upload_descriptor(logger, tg_vrouter_ts_nsd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
nsd = nsds[0]
assert nsd.name == "tg_vrouter_ts_nsd"
def test_instantiate_tg_vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
nsr = create_nsr_from_nsd_id(nsd.id)
- nsr_proxy.merge_config('/ns-instance-config', nsr)
+ nsr_proxy.merge_config('/rw-project:project[rw-project:name="default"]/ns-instance-config', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
assert len(nsrs) == 1
assert nsrs[0].ns_instance_config_ref == nsr.id
diff --git a/rwlaunchpad/test/racfg/lprecovery_test.racfg b/rwlaunchpad/test/racfg/lprecovery_test.racfg
index 43e07aa..7ce907d 100644
--- a/rwlaunchpad/test/racfg/lprecovery_test.racfg
+++ b/rwlaunchpad/test/racfg/lprecovery_test.racfg
@@ -5,7 +5,7 @@
"test_description":"Test targeting launchpad recovery feature",
"run_as_root": true,
"status":"broken",
- "keywords":["nightly","smoke"],
+ "keywords":["nightly"],
"timelimit": 4800,
"networks":[],
"vms":[
diff --git a/rwlaunchpad/test/tosca_ut.py b/rwlaunchpad/test/tosca_ut.py
index 40efe41..31a9276 100755
--- a/rwlaunchpad/test/tosca_ut.py
+++ b/rwlaunchpad/test/tosca_ut.py
@@ -26,6 +26,10 @@
import unittest
import xmlrunner
+#Setting RIFT_VAR_ROOT if not already set for unit test execution
+if "RIFT_VAR_ROOT" not in os.environ:
+ os.environ['RIFT_VAR_ROOT'] = os.path.join(os.environ['RIFT_INSTALL'], 'var/rift/unittest')
+
import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
from rift.mano.utils.compare_desc import CompareDescShell
diff --git a/rwlaunchpad/test/utest_nsr_handler.py b/rwlaunchpad/test/utest_nsr_handler.py
index ffab929..40049b3 100755
--- a/rwlaunchpad/test/utest_nsr_handler.py
+++ b/rwlaunchpad/test/utest_nsr_handler.py
@@ -18,13 +18,13 @@
import argparse
import asyncio
+import gi
import logging
import os
import sys
import time
import unittest
import uuid
-
import xmlrunner
import gi.repository.RwDts as rwdts
@@ -38,6 +38,9 @@
import rift.tasklets
import rift.test.dts
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
import mano_ut
@@ -47,8 +50,8 @@
class NsrDtsHandler(object):
""" The network service DTS handler """
- NSR_XPATH = "C,/nsr:ns-instance-config/nsr:nsr"
- SCALE_INSTANCE_XPATH = "C,/nsr:ns-instance-config/nsr:nsr/nsr:scaling-group/nsr:instance"
+ NSR_XPATH = "C,/rw-project:project/nsr:ns-instance-config/nsr:nsr"
+ SCALE_INSTANCE_XPATH = "C,/rw-project:project/nsr:ns-instance-config/nsr:nsr/nsr:scaling-group/nsr:instance"
def __init__(self, dts, log, loop, nsm):
self._dts = dts
@@ -66,12 +69,12 @@
def get_scale_group_instances(self, nsr_id, group_name):
def nsr_id_from_keyspec(ks):
- nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+ nsr_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
nsr_id = nsr_path_entry.key00.id
return nsr_id
def group_name_from_keyspec(ks):
- group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+ group_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
group_name = group_path_entry.key00.scaling_group_name_ref
return group_name
@@ -95,12 +98,12 @@
""" Register for Nsr create/update/delete/read requests from dts """
def nsr_id_from_keyspec(ks):
- nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+ nsr_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
nsr_id = nsr_path_entry.key00.id
return nsr_id
def group_name_from_keyspec(ks):
- group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+ group_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
group_name = group_path_entry.key00.scaling_group_name_ref
return group_name
@@ -327,16 +330,16 @@
class XPaths(object):
@staticmethod
def nsr_config(nsr_id=None):
- return ("C,/nsr:ns-instance-config/nsr:nsr" +
- ("[nsr:id='{}']".format(nsr_id) if nsr_id is not None else ""))
+ return ("C,/rw-project:project/nsr:ns-instance-config/nsr:nsr" +
+ ("[nsr:id={}]".format(quoted_key(nsr_id)) if nsr_id is not None else ""))
def scaling_group_instance(nsr_id, group_name, instance_id):
- return ("C,/nsr:ns-instance-config/nsr:nsr" +
- "[nsr:id='{}']".format(nsr_id) +
+ return ("C,/rw-project:project/nsr:ns-instance-config/nsr:nsr" +
+ "[nsr:id={}]".format(quoted_key(nsr_id)) +
"/nsr:scaling-group" +
- "[nsr:scaling-group-name-ref='{}']".format(group_name) +
+ "[nsr:scaling-group-name-ref={}]".format(quoted_key(group_name)) +
"/nsr:instance" +
- "[nsr:id='{}']".format(instance_id)
+ "[nsr:id={}]".format(quoted_key(instance_id))
)
@@ -377,7 +380,7 @@
block = xact.block_create()
block.add_query_update(
XPaths.nsr_config(nsr1_uuid),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr(id=nsr1_uuid, name="fu"),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr(id=nsr1_uuid, name="fu"),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
@@ -388,7 +391,7 @@
block = xact.block_create()
block.add_query_update(
XPaths.scaling_group_instance(nsr1_uuid, "group", 1234),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=1234),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=1234),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
@@ -409,7 +412,7 @@
block = xact.block_create()
block.add_query_create(
XPaths.scaling_group_instance(nsr1_uuid, "group", 12345),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=12345),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=12345),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
@@ -427,7 +430,7 @@
block = xact.block_create()
block.add_query_update(
XPaths.nsr_config(nsr2_uuid),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr(id=nsr2_uuid, name="fu2"),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr(id=nsr2_uuid, name="fu2"),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
diff --git a/rwlaunchpad/test/utest_ro_account.py b/rwlaunchpad/test/utest_ro_account.py
index aa485ef..6c08fe8 100755
--- a/rwlaunchpad/test/utest_ro_account.py
+++ b/rwlaunchpad/test/utest_ro_account.py
@@ -20,22 +20,30 @@
import types
import unittest
import uuid
+import os
+import xmlrunner
+
+#Setting RIFT_VAR_ROOT if not already set for unit test execution
+if "RIFT_VAR_ROOT" not in os.environ:
+ os.environ['RIFT_VAR_ROOT'] = os.path.join(os.environ['RIFT_INSTALL'], 'var/rift/unittest')
import rift.test.dts
import rift.tasklets.rwnsmtasklet.cloud as cloud
+import rift.tasklets.rwnsmtasklet.rwnsmplugin as rwnsmplugin
import rift.tasklets.rwnsmtasklet.openmano_nsm as openmano_nsm
+from rift.mano.utils.project import ManoProject
import rw_peas
import gi
-gi.require_version('RwDtsYang', '1.0')
+gi.require_version('RwDts', '1.0')
from gi.repository import (
- RwLaunchpadYang as launchpadyang,
+ RwRoAccountYang as roaccountyang,
RwDts as rwdts,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwVnfrYang,
RwNsrYang,
- RwNsdYang,
- VnfrYang
+ RwProjectNsdYang as RwNsdYang,
+ VnfrYang,
)
@@ -44,10 +52,17 @@
self.log = log
self.loop = loop
self.dts = dts
-
self._registrations = []
@asyncio.coroutine
+ def update(self, xpath, desc):
+ self._registrations[-1].update_element(xpath, desc)
+
+ @asyncio.coroutine
+ def delete(self, xpath):
+ self._registrations[-1].delete_element(xpath)
+
+ @asyncio.coroutine
def publish(self, w_path, path, desc):
ready_event = asyncio.Event(loop=self.loop)
@@ -65,11 +80,13 @@
)
self.log.debug("Registering path: %s, obj:%s", w_path, desc)
+
reg = yield from self.dts.register(
w_path,
handler,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
)
+
self._registrations.append(reg)
self.log.debug("Registered path : %s", w_path)
yield from ready_event.wait()
@@ -84,7 +101,7 @@
class RoAccountDtsTestCase(rift.test.dts.AbstractDTSTest):
@classmethod
def configure_schema(cls):
- return launchpadyang.get_schema()
+ return roaccountyang.get_schema()
@classmethod
def configure_timeout(cls):
@@ -94,6 +111,7 @@
self.log.debug("STARTING - %s", test_id)
self.tinfo = self.new_tinfo(str(test_id))
self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+ self.project = ManoProject(self.log)
self.tinfo_sub = self.new_tinfo(str(test_id) + "_sub")
self.dts_sub = rift.tasklets.DTS(self.tinfo_sub, self.schema, self.loop)
@@ -105,59 +123,44 @@
@rift.test.dts.async_test
def test_orch_account_create(self):
- orch = cloud.ROAccountPluginSelector(self.dts, self.log, self.loop, None)
-
- yield from orch.register()
-
+ ro_cfg_sub = cloud.ROAccountConfigSubscriber(self.dts, self.log, self.loop, self.project, None)
+ yield from ro_cfg_sub.register()
+
+ ro_plugin = ro_cfg_sub.get_ro_plugin(account_name=None)
# Test if we have a default plugin in case no RO is specified.
- assert type(orch.ro_plugin) is cloud.RwNsPlugin
- mock_orch_acc = launchpadyang.ResourceOrchestrator.from_dict(
- {'name': 'rift-ro', 'account_type': 'rift_ro', 'rift_ro': {'rift_ro': True}})
+ assert type(ro_plugin) is rwnsmplugin.RwNsPlugin
# Test rift-ro plugin CREATE
- w_xpath = "C,/rw-launchpad:resource-orchestrator"
- xpath = w_xpath
- yield from self.publisher.publish(w_xpath, xpath, mock_orch_acc)
- yield from asyncio.sleep(5, loop=self.loop)
-
- assert type(orch.ro_plugin) is cloud.RwNsPlugin
+ w_xpath = self.project.add_project("C,/rw-ro-account:ro-account/rw-ro-account:account")
+ xpath = w_xpath + "[rw-ro-account:name='openmano']"
# Test Openmano plugin CREATE
- mock_orch_acc = launchpadyang.ResourceOrchestrator.from_dict(
+ mock_orch_acc = roaccountyang.YangData_RwProject_Project_RoAccount_Account.from_dict(
{'name': 'openmano',
- 'account_type': 'openmano',
+ 'ro_account_type': 'openmano',
'openmano': {'tenant_id': "abc",
"port": 9999,
"host": "10.64.11.77"}})
+
yield from self.publisher.publish(w_xpath, xpath, mock_orch_acc)
yield from asyncio.sleep(5, loop=self.loop)
-
- assert type(orch.ro_plugin) is openmano_nsm.OpenmanoNsPlugin
- assert orch.ro_plugin._cli_api._port == mock_orch_acc.openmano.port
- assert orch.ro_plugin._cli_api._host == mock_orch_acc.openmano.host
+
+ ro_plugin = ro_cfg_sub.get_ro_plugin(account_name='openmano')
+ assert type(ro_plugin) is openmano_nsm.OpenmanoNsPlugin
# Test update
mock_orch_acc.openmano.port = 9789
mock_orch_acc.openmano.host = "10.64.11.78"
- yield from self.dts.query_update("C,/rw-launchpad:resource-orchestrator",
- rwdts.XactFlag.ADVISE, mock_orch_acc)
- assert orch.ro_plugin._cli_api._port == mock_orch_acc.openmano.port
- assert orch.ro_plugin._cli_api._host == mock_orch_acc.openmano.host
+ yield from self.publisher.update(xpath, mock_orch_acc)
+ yield from asyncio.sleep(5, loop=self.loop)
- # Test update when a live instance exists
- # Exception should be thrown
- orch.handle_nsr(None, rwdts.QueryAction.CREATE)
- mock_orch_acc.openmano.port = 9788
+ #Since update means delete followed by a insert get the new ro_plugin.
+ ro_plugin = ro_cfg_sub.get_ro_plugin(account_name='openmano')
+ assert ro_plugin._cli_api._port == mock_orch_acc.openmano.port
+ assert ro_plugin._cli_api._host == mock_orch_acc.openmano.host
- with self.assertRaises(Exception):
- yield from self.dts.query_update("C,/rw-launchpad:resource-orchestrator",
- rwdts.XactFlag.ADVISE, mock_orch_acc)
-
- # Test delete
- yield from self.dts.query_delete("C,/rw-launchpad:resource-orchestrator",
- flags=rwdts.XactFlag.ADVISE)
- assert orch.ro_plugin == None
-
+ # Test delete to be implemented. right now facing some dts issues.
+ # Use DescriptorPublisher delete for deletion
def main(argv=sys.argv[1:]):
@@ -166,8 +169,8 @@
# when this is called from the interpreter).
unittest.main(
argv=[__file__] + argv,
- testRunner=None#xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+ testRunner=xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
)
if __name__ == '__main__':
- main()
\ No newline at end of file
+ main()
diff --git a/rwlaunchpad/test/utest_rwmonitor.py b/rwlaunchpad/test/utest_rwmonitor.py
index 46c33b3..b69815f 100755
--- a/rwlaunchpad/test/utest_rwmonitor.py
+++ b/rwlaunchpad/test/utest_rwmonitor.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -60,6 +60,7 @@
UnknownAccountError,
)
import rw_peas
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
class wait_for_pending_tasks(object):
@@ -108,17 +109,17 @@
def make_nsr(ns_instance_config_ref=str(uuid.uuid4())):
- nsr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr()
nsr.ns_instance_config_ref = ns_instance_config_ref
return nsr
def make_vnfr(id=str(uuid.uuid4())):
- vnfr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ vnfr = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
vnfr.id = id
return vnfr
def make_vdur(id=str(uuid.uuid4()), vim_id=str(uuid.uuid4())):
- vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.id = id
vdur.vim_id = vim_id
return vdur
@@ -130,7 +131,7 @@
return True
def nfvi_metrics(self, account, vim_id):
- metrics = RwmonYang.NfviMetrics()
+ metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics()
metrics.vcpu.utilization = 0.5
return metrics
@@ -138,7 +139,7 @@
self.loop = asyncio.new_event_loop()
self.logger = logging.getLogger('test-logger')
- self.account = RwcalYang.CloudAccount(
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList(
name='test-cloud-account',
account_type="mock",
)
@@ -149,7 +150,7 @@
mock = self.plugin_manager.plugin(self.account.name)
mock.set_impl(TestNfviMetricsCache.Plugin())
- self.vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ self.vdur = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
self.vdur.id = "test-vdur-id"
self.vdur.vim_id = "test-vim-id"
self.vdur.vm_flavor.vcpu_count = 4
@@ -207,13 +208,13 @@
return True
def nfvi_metrics(self, account, vim_id):
- metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+ metrics = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
metrics.vcpu.utilization = 0.5
return None, metrics
def setUp(self):
self.loop = asyncio.new_event_loop()
- self.account = RwcalYang.CloudAccount(
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList(
name='test-cloud-account',
account_type="mock",
)
@@ -287,7 +288,7 @@
self._alarms = set()
def nfvi_metrics(self, account, vm_id):
- return rwmon.NfviMetrics()
+ return rwmon.YangData_RwProject_Project_NfviMetrics()
def nfvi_metrics_available(self, account):
return True
@@ -305,7 +306,7 @@
self.loop = asyncio.new_event_loop()
self.logger = logging.getLogger('test-logger')
- self.account = RwcalYang.CloudAccount(
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList(
name='test-cloud-account',
account_type="mock",
)
@@ -339,8 +340,9 @@
def test_retrieve(self):
pass
+ @unittest.skip("Alarms are being disabled in monitor")
def test_alarm_create_and_destroy(self):
- alarm = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_Alarms()
+ alarm = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_Alarms()
alarm.name = "test-alarm"
alarm.description = "test-description"
alarm.vdur_id = "test-vdur-id"
@@ -401,7 +403,7 @@
# return a VCPU utilization of 0.5.
class MockPlugin(object):
def __init__(self):
- self.metrics = RwmonYang.NfviMetrics()
+ self.metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics()
def nfvi_metrics(self, account, vim_id):
self.metrics.vcpu.utilization = 0.5
@@ -410,7 +412,7 @@
self.loop = asyncio.get_event_loop()
self.logger = logging.getLogger('test-logger')
- self.account = RwcalYang.CloudAccount(
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList(
name='test-cloud-account',
account_type="mock",
)
@@ -485,7 +487,7 @@
def setUp(self):
self.logger = logging.getLogger('test-logger')
self.plugins = NfviMetricsPluginManager(self.logger)
- self.account = RwcalYang.CloudAccount(
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList(
name='test-cloud-account',
account_type="mock",
)
@@ -553,10 +555,11 @@
self.loop = asyncio.get_event_loop()
self.logger = logging.getLogger('test-logger')
+ self.project = ManoProject(self.logger, name=DEFAULT_PROJECT)
self.config = InstanceConfiguration()
- self.monitor = Monitor(self.loop, self.logger, self.config)
+ self.monitor = Monitor(self.loop, self.logger, self.config, self.project)
- self.account = RwcalYang.CloudAccount(
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList(
name='test-cloud-account',
account_type="mock",
)
@@ -606,8 +609,8 @@
self.monitor.add_cloud_account(self.account)
# Create a VNFR associated with the cloud account
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
- vnfr.cloud_account = self.account.name
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
+ vnfr.datacenter = self.account.name
vnfr.id = 'test-vnfr-id'
# Add a VDUR to the VNFR
@@ -644,7 +647,7 @@
to retrieve the NFVI metrics associated with the VDU.
"""
# Define the VDUR to be registered
- vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.vm_flavor.vcpu_count = 4
vdur.vm_flavor.memory_mb = 100
vdur.vm_flavor.storage_gb = 2
@@ -680,12 +683,12 @@
the VDURs contained in the VNFR are unregistered.
"""
# Define the VDUR to be registered
- vdur = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.vim_id = 'test-vim-id-1'
vdur.id = 'test-vdur-id-1'
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
- vnfr.cloud_account = self.account.name
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
+ vnfr.datacenter = self.account.name
vnfr.id = 'test-vnfr-id'
vnfr.vdur.append(vdur)
@@ -699,7 +702,7 @@
# Add another VDUR to the VNFR and update the monitor. Both VDURs
# should now be registered
- vdur = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.vim_id = 'test-vim-id-2'
vdur.id = 'test-vdur-id-2'
@@ -730,8 +733,8 @@
Monitor.
"""
# Create the VNFR
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
- vnfr.cloud_account = self.account.name
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
+ vnfr.datacenter = self.account.name
vnfr.id = 'test-vnfr-id'
# Create 2 VDURs
@@ -752,8 +755,8 @@
class MockPlugin(object):
def __init__(self):
self._metrics = dict()
- self._metrics['test-vim-id-1'] = RwmonYang.NfviMetrics()
- self._metrics['test-vim-id-2'] = RwmonYang.NfviMetrics()
+ self._metrics['test-vim-id-1'] = RwmonYang.YangData_RwProject_Project_NfviMetrics()
+ self._metrics['test-vim-id-2'] = RwmonYang.YangData_RwProject_Project_NfviMetrics()
def nfvi_metrics(self, account, vim_id):
metrics = self._metrics[vim_id]
diff --git a/rwlaunchpad/test/utest_rwnsm.py b/rwlaunchpad/test/utest_rwnsm.py
index e125739..48b4ff2 100755
--- a/rwlaunchpad/test/utest_rwnsm.py
+++ b/rwlaunchpad/test/utest_rwnsm.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-17 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -25,15 +25,29 @@
import uuid
import xmlrunner
+import gi
+gi.require_version('ProjectNsdYang', '1.0')
+gi.require_version('NsrYang', '1.0')
+
+#Setting RIFT_VAR_ROOT if not already set for unit test execution
+if "RIFT_VAR_ROOT" not in os.environ:
+ os.environ['RIFT_VAR_ROOT'] = os.path.join(os.environ['RIFT_INSTALL'], 'var/rift/unittest')
+
from gi.repository import (
- NsdYang,
- NsrYang,
- )
+ ProjectNsdYang,
+ NsrYang,
+)
+
logger = logging.getLogger('test-rwnsmtasklet')
import rift.tasklets.rwnsmtasklet.rwnsmtasklet as rwnsmtasklet
import rift.tasklets.rwnsmtasklet.xpath as rwxpath
+from rift.mano.utils.project import ManoProject
+
+
+def prefix_project(xpath):
+ return "/rw-project:project" + xpath
class TestGiXpath(unittest.TestCase):
def setUp(self):
@@ -46,26 +60,27 @@
"""
# Create the initial NSD catalog
- nsd_catalog = NsdYang.YangData_Nsd_NsdCatalog()
+ nsd_catalog = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog()
# Create an NSD, set its 'id', and add it to the catalog
nsd_id = str(uuid.uuid4())
nsd_catalog.nsd.append(
- NsdYang.YangData_Nsd_NsdCatalog_Nsd(
+ ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd(
id=nsd_id,
)
)
# Retrieve the NSD using and xpath expression
- xpath = '/nsd:nsd-catalog/nsd:nsd[nsd:id={}]'.format(nsd_id)
+ xpath = prefix_project('/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id={}]'.
+ format(nsd_id))
nsd = rwxpath.getxattr(nsd_catalog, xpath)
self.assertEqual(nsd_id, nsd.id)
# Modified the name of the NSD using an xpath expression
- rwxpath.setxattr(nsd_catalog, xpath + "/nsd:name", "test-name")
+ rwxpath.setxattr(nsd_catalog, xpath + "/project-nsd:name", "test-name")
- name = rwxpath.getxattr(nsd_catalog, xpath + "/nsd:name")
+ name = rwxpath.getxattr(nsd_catalog, xpath + "/project-nsd:name")
self.assertEqual("test-name", name)
def test_nsd_scalar_fields(self):
@@ -74,24 +89,27 @@
"""
# Define a simple NSD
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ nsd = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
+
+ xpath = prefix_project('/project-nsd:nsd-catalog/project-nsd:nsd')
# Check that the unset fields are in fact set to None
- self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name"))
- self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name"))
+ self.assertEqual(None, rwxpath.getxattr(nsd, xpath + "/project-nsd:name"))
+ self.assertEqual(None, rwxpath.getxattr(nsd, xpath + "/project-nsd:short-name"))
# Set the values of the 'name' and 'short-name' fields
- rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name", "test-name")
- rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name", "test-short-name")
+ rwxpath.setxattr(nsd, xpath + "/project-nsd:name", "test-name")
+ rwxpath.setxattr(nsd, xpath + "/project-nsd:short-name", "test-short-name")
# Check that the 'name' and 'short-name' fields are correctly set
- self.assertEqual(nsd.name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name"))
- self.assertEqual(nsd.short_name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name"))
+ self.assertEqual(nsd.name, rwxpath.getxattr(nsd, xpath + "/project-nsd:name"))
+ self.assertEqual(nsd.short_name, rwxpath.getxattr(nsd, xpath + "/project-nsd:short-name"))
class TestInputParameterSubstitution(unittest.TestCase):
def setUp(self):
- self.substitute_input_parameters = rwnsmtasklet.InputParameterSubstitution(logger)
+ project = ManoProject(logger)
+ self.substitute_input_parameters = rwnsmtasklet.InputParameterSubstitution(logger, project)
def test_null_arguments(self):
"""
@@ -99,8 +117,8 @@
config, no exception should be raised.
"""
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
- nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsd = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
+ nsr_config = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
self.substitute_input_parameters(None, None)
self.substitute_input_parameters(nsd, None)
@@ -115,26 +133,26 @@
"""
# Define the original NSD
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ nsd = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
nsd.name = "robert"
nsd.short_name = "bob"
# Define which parameters may be modified
nsd.input_parameter_xpath.append(
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
label="NSD Name",
)
)
# Define the input parameters that are intended to be modified
- nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr_config = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr_config.input_parameter.extend([
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
value="alice",
),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
value="alice",
),
@@ -153,30 +171,30 @@
"""
# Define the original NSD
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
- nsd.name = "robert"
- nsd.short_name = "bob"
+ nsd = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
+ # nsd.name = "robert"
+ # nsd.short_name = "bob"
# Define which parameters may be modified
nsd.input_parameter_xpath.extend([
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
label="NSD Name",
),
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
label="NSD Short Name",
),
])
# Define the input parameters that are intended to be modified
- nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr_config = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr_config.input_parameter.extend([
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
value="robert",
),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
value="bob",
),
diff --git a/rwlaunchpad/test/utest_scaling_rpc.py b/rwlaunchpad/test/utest_scaling_rpc.py
index b2290af..ac25676 100644
--- a/rwlaunchpad/test/utest_scaling_rpc.py
+++ b/rwlaunchpad/test/utest_scaling_rpc.py
@@ -16,19 +16,18 @@
# limitations under the License.
#
-
+import argparse
import asyncio
+import gi
+import logging
import os
import sys
+import time
+import types
import unittest
import uuid
import xmlrunner
-import argparse
-import logging
-import time
-import types
-import gi
gi.require_version('RwCloudYang', '1.0')
gi.require_version('RwDts', '1.0')
gi.require_version('RwNsmYang', '1.0')
@@ -51,6 +50,8 @@
RwConfigAgentYang as rwcfg_agent,
RwlogMgmtYang
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
from gi.repository.RwTypes import RwStatus
import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
@@ -92,7 +93,7 @@
"""
Creates an object for class RwcalYang.Clo
"""
- account = rwcloudyang.CloudAccount()
+ account = rwcloudyang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
if account_type == 'mock':
account.name = account_name
account.account_type = "mock"
@@ -110,7 +111,7 @@
@asyncio.coroutine
def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1"):
account = self.get_cal_account(cloud_type, cloud_name)
- account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name)
+ account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name={}]".format(quoted_key(cloud_name))
self.log.info("Configuring cloud-account: %s", account)
yield from dts.query_create(account_xpath,
rwdts.XactFlag.ADVISE,